summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/abm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/abm')
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c283
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c379
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c363
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h208
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c850
5 files changed, 1657 insertions, 426 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
new file mode 100644
index 000000000000..9852080cf454
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_net_repr.h"
+#include "main.h"
+
+struct nfp_abm_u32_match {
+ u32 handle;
+ u32 band;
+ u8 mask;
+ u8 val;
+ struct list_head list;
+};
+
+static bool
+nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct tc_u32_key *k;
+ unsigned int tos_off;
+
+ if (knode->exts && tcf_exts_has_actions(knode->exts)) {
+ NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
+ return false;
+ }
+ if (knode->link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "linking not supported");
+ return false;
+ }
+ if (knode->sel->flags != TC_U32_TERMINAL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flags must be equal to TC_U32_TERMINAL");
+ return false;
+ }
+ if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
+ knode->sel->offoff || knode->fshift) {
+ NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ return false;
+ }
+ if (knode->sel->hoff || knode->sel->hmask) {
+ NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
+ return false;
+ }
+ if (knode->val || knode->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
+ return false;
+ }
+ if (knode->res && knode->res->class) {
+ NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
+ return false;
+ }
+ if (knode->res && knode->res->classid >= abm->num_bands) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "classid higher than number of bands");
+ return false;
+ }
+ if (knode->sel->nkeys != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
+ return false;
+ }
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ tos_off = 16;
+ break;
+ case htons(ETH_P_IPV6):
+ tos_off = 20;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
+ return false;
+ }
+
+ k = &knode->sel->keys[0];
+ if (k->offmask) {
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ return false;
+ }
+ if (k->off) {
+ NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
+ return false;
+ }
+ if (k->val & ~k->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
+ return false;
+ }
+ if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
+ NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
+ nfp_err(abm->app->cpp,
+ "u32 offload: requested mask %x FW can support only %x\n",
+ be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* This filter list -> map conversion is O(n * m), we expect single digit or
+ * low double digit number of prios and likewise for the filters. Also u32
+ * doesn't report stats, so it's really only setup time cost.
+ */
+static unsigned int
+nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if ((prio & iter->mask) == iter->val)
+ return iter->band;
+
+ return alink->def_band;
+}
+
+static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
+{
+ unsigned int i, bits_per_prio, prios_per_word, base_shift;
+ struct nfp_abm *abm = alink->abm;
+ u32 field_mask;
+
+ alink->has_prio = !list_empty(&alink->dscp_map);
+
+ bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
+ field_mask = (1 << bits_per_prio) - 1;
+ prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
+
+ /* FW mask applies from top bits */
+ base_shift = 8 - order_base_2(abm->num_prios);
+
+ for (i = 0; i < abm->num_prios; i++) {
+ unsigned int offset;
+ u32 *word;
+ u8 band;
+
+ word = &alink->prio_map[i / prios_per_word];
+ offset = (i % prios_per_word) * bits_per_prio;
+
+ band = nfp_abm_find_band_for_prio(alink, i << base_shift);
+
+ *word &= ~(field_mask << offset);
+ *word |= band << offset;
+ }
+
+ /* Qdisc offload status may change if has_prio changed */
+ nfp_abm_qdisc_offload_update(alink);
+
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+}
+
+static void
+nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if (iter->handle == knode->handle) {
+ list_del(&iter->list);
+ kfree(iter);
+ nfp_abm_update_band_map(alink);
+ return;
+ }
+}
+
+static int
+nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct nfp_abm_u32_match *match = NULL, *iter;
+ unsigned int tos_off;
+ u8 mask, val;
+ int err;
+
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ goto err_delete;
+
+ tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+
+ /* Extract the DSCP Class Selector bits */
+ val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
+ mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
+
+ /* Check if there is no conflicting mapping and find match by handle */
+ list_for_each_entry(iter, &alink->dscp_map, list) {
+ u32 cmask;
+
+ if (iter->handle == knode->handle) {
+ match = iter;
+ continue;
+ }
+
+ cmask = iter->mask & mask;
+ if ((iter->val & cmask) == (val & cmask) &&
+ iter->band != knode->res->classid) {
+ NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+ goto err_delete;
+ }
+ }
+
+ if (!match) {
+ match = kzalloc(sizeof(*match), GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+ list_add(&match->list, &alink->dscp_map);
+ }
+ match->handle = knode->handle;
+ match->band = knode->res->classid;
+ match->mask = mask;
+ match->val = val;
+
+ err = nfp_abm_update_band_map(alink);
+ if (err)
+ goto err_delete;
+
+ return 0;
+
+err_delete:
+ nfp_abm_u32_knode_delete(alink, knode);
+ return -EOPNOTSUPP;
+}
+
+static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_u32_offload *cls_u32 = type_data;
+ struct nfp_repr *repr = cb_priv;
+ struct nfp_abm_link *alink;
+
+ alink = repr->app_priv;
+
+ if (type != TC_SETUP_CLSU32) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only offload of u32 classifier supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
+ return -EOPNOTSUPP;
+
+ if (cls_u32->common.protocol != htons(ETH_P_IP) &&
+ cls_u32->common.protocol != htons(ETH_P_IPV6)) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only IP and IPv6 supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
+ cls_u32->common.protocol,
+ cls_u32->common.extack);
+ case TC_CLSU32_DELETE_KNODE:
+ nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..9584f03f3efa 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
@@ -11,38 +13,58 @@
#include "../nfp_net.h"
#include "main.h"
-#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
+#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
+#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
+#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
+
+#define NFP_RED_SUPPORT_SYM_NAME "_abi_nfd_out_red_offload_%u"
+
+#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
#define NFP_QLVL_STRIDE 16
#define NFP_QLVL_BLOG_BYTES 0
#define NFP_QLVL_BLOG_PKTS 4
#define NFP_QLVL_THRS 8
+#define NFP_QLVL_ACT 12
-#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
#define NFP_QMSTAT_STRIDE 32
#define NFP_QMSTAT_NON_STO 0
#define NFP_QMSTAT_STO 8
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
+#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
+#define NFP_Q_STAT_STRIDE 16
+#define NFP_Q_STAT_PKTS 0
+#define NFP_Q_STAT_BYTES 8
+
+#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
+#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
+#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
+#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
+
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, unsigned int i,
- bool is_u64, u64 *res)
+ unsigned int stride, unsigned int offset, unsigned int band,
+ unsigned int queue, bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u64 val, sym_offset;
+ unsigned int qid;
u32 val32;
int err;
- sym_offset = (alink->queue_base + i) * stride + offset;
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ sym_offset = qid * stride + offset;
if (is_u64)
err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
- nfp_err(cpp,
- "RED offload reading stat failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
+ alink->id, band, queue, alink->queue_base);
return err;
}
@@ -50,175 +72,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- u64 val, sum = 0;
- unsigned int i;
+ struct nfp_cpp *cpp = abm->app->cpp;
+ u64 sym_offset;
int err;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
+ if (err) {
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
+ return err;
}
- *res = sum;
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int threshold;
+
+ threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
+}
+
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act)
+{
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ if (abm->actions[id] == act)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting action failed on subqueue %d\n",
+ id);
return err;
}
+ abm->actions[id] = act;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act)
+{
+ unsigned int qid;
+
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
+}
+
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- int i, err;
+ unsigned int band;
+ u64 val, sum = 0;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
}
- return 0;
+ return sum;
}
-u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- u64 val;
+ unsigned int band;
+ u64 val, sum = 0;
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_NON_STO, i, true, &val))
- return 0;
- return val;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
+ }
+
+ return sum;
}
-u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+static int
+nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, unsigned int off, u64 *val)
{
- u64 val;
+ if (!nfp_abm_has_prio(alink->abm)) {
+ if (!band) {
+ unsigned int id = alink->queue_base + queue;
+
+ *val = nn_readq(alink->vnic,
+ NFP_NET_CFG_RXR_STATS(id) + off);
+ } else {
+ *val = 0;
+ }
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_STO, i, true, &val))
return 0;
- return val;
+ } else {
+ return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
+ NFP_Q_STAT_STRIDE, off, band, queue,
+ true, val);
+ }
}
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
- struct nfp_alink_stats *stats)
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *stats)
{
int err;
- stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
+ &stats->tx_pkts);
+ if (err)
+ return err;
- err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- i, false, &stats->backlog_bytes);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
+ &stats->tx_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
+ NFP_QLVL_BLOG_BYTES, band, queue, false,
+ &stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- i, false, &stats->backlog_pkts);
+ band, queue, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &stats->drops);
+ band, queue, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &stats->overlimits);
+ band, queue, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
-}
-
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &xstats->pdrop);
+ band, queue, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &xstats->ecn_marked);
-}
-
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
+ band, queue, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@@ -233,10 +259,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
NULL, 0, NULL, 0);
}
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
+{
+ struct nfp_net *nn = alink->vnic;
+ unsigned int i;
+ int err;
+
+ /* Write data_len and wipe reserved */
+ nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
+ alink->abm->prio_map_len);
+
+ for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
+ packed[i / sizeof(u32)]);
+
+ err = nfp_net_reconfig_mbox(nn,
+ NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ if (err)
+ nfp_err(alink->abm->app->cpp,
+ "setting DSCP -> VQ map failed with error %d\n", err);
+ return err;
+}
+
+static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct nfp_net *nn = alink->vnic;
+ unsigned int min_mbox_sz;
+
+ if (!nfp_abm_has_prio(alink->abm))
+ return 0;
+
+ min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
+ if (nn->tlv_caps.mbox_len < min_mbox_sz) {
+ nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
+ nn->tlv_caps.mbox_len, min_mbox_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
+
+ return nfp_abm_ctrl_prio_check_params(alink);
+}
+
+static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
+{
+ unsigned int size;
+
+ size = roundup_pow_of_two(order_base_2(abm->num_bands));
+ size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
+ size = round_up(size, sizeof(u32));
+
+ return size;
}
static const struct nfp_rtsym *
@@ -260,33 +340,86 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
}
static const struct nfp_rtsym *
-nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
- unsigned int size)
+nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
+ size_t size)
{
- return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+ char pf_symbol[64];
+
+ size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
+ abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
+
+ return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
const struct nfp_rtsym *sym;
- unsigned int pf_id;
- char pf_symbol[64];
+ int res;
+
+ abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+
+ /* Check if Qdisc offloads are supported */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->red_support = res;
+
+ /* Read count of prios and prio bands */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_bands = res;
+
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_prios = res;
+
+ /* Read available actions */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
+ BIT(NFP_ABM_ACT_MARK_DROP));
+ if (res < 0)
+ return res;
+ abm->action_mask = res;
+
+ abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
+ abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
+
+ /* Check values are sane, U16_MAX is arbitrarily chosen as max */
+ if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
+ abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
+ (abm->num_bands == 1) != (abm->num_prios == 1)) {
+ nfp_err(pf->cpp,
+ "invalid priomap description num bands: %u and num prios: %u\n",
+ abm->num_bands, abm->num_prios);
+ return -EINVAL;
+ }
- pf_id = nfp_cppcore_pcie_unit(pf->cpp);
- abm->pf_id = pf_id;
+ /* Find level and stat symbols */
+ if (!abm->red_support)
+ return 0;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
+ NFP_QLVL_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_lvls = sym;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
+ NFP_QMSTAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->qm_stats = sym;
+ if (nfp_abm_has_prio(abm)) {
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
+ NFP_Q_STAT_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->q_stats = sym;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0c2c3f..4d4ff5844c47 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
}
static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
-{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
-
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
-
- return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
-{
- __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
-
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
-
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
-
- return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
- return;
-
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
- }
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- bool existing;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
-
- if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
- nfp_warn(alink->abm->app->cpp,
- "RED offload failed - unsupported parameters\n");
- err = -EINVAL;
- goto err_destroy;
- }
-
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
- return 0;
- }
-
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
- return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
-
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
-
- return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
-{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
- new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
- *prev_stats = stats;
-
- return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
-
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
- *prev_xstats = xstats;
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_RED_REPLACE:
- return nfp_abm_red_replace(netdev, alink, opt);
- case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
- return 0;
- case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
- case TC_RED_XSTATS:
- return nfp_abm_red_xstats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
- struct nfp_alink_stats stats;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
- continue;
-
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
- }
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_mq_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
- case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- return 0;
- case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
@@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+ case TC_SETUP_QDISC_GRED:
+ return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
+ case TC_SETUP_BLOCK:
+ return nfp_abm_setup_cls_block(netdev, repr, type_data);
default:
return -EOPNOTSUPP;
}
@@ -384,7 +126,9 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink,
reprs = nfp_reprs_get_locked(app, rtype);
WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr");
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], netdev);
+ rtnl_unlock();
nfp_info(app->cpp, "%s Port %d Representor(%s) created\n",
ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys",
@@ -410,7 +154,9 @@ nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink,
netdev = nfp_repr_get_locked(app, reprs, alink->id);
if (!netdev)
return;
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], NULL);
+ rtnl_unlock();
synchronize_rcu();
/* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */
nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev));
@@ -461,6 +207,9 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm)
struct nfp_net *nn;
int err;
+ if (!abm->red_support)
+ return -EOPNOTSUPP;
+
err = nfp_abm_ctrl_qm_enable(abm);
if (err)
return err;
@@ -573,31 +322,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
+
+ INIT_LIST_HEAD(&alink->dscp_map);
+
+ err = nfp_abm_ctrl_read_params(alink);
+ if (err)
+ goto err_free_alink;
+
+ alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
+ if (!alink->prio_map)
goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_priomap;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
- nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
+err_free_priomap:
+ kfree(alink->prio_map);
err_free_alink:
kfree(alink);
return err;
@@ -608,10 +360,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
+ kfree(alink->prio_map);
kfree(alink);
}
+static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ if (nfp_abm_has_prio(alink->abm))
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+ return 0;
+}
+
static u64 *
nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
{
@@ -659,6 +421,21 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port,
return data;
}
+static int nfp_abm_fw_init_reset(struct nfp_abm *abm)
+{
+ unsigned int i;
+
+ if (!abm->red_support)
+ return 0;
+
+ for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++) {
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+ __nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
+ }
+
+ return nfp_abm_ctrl_qm_disable(abm);
+}
+
static int nfp_abm_init(struct nfp_app *app)
{
struct nfp_pf *pf = app->pf;
@@ -690,15 +467,31 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+
+ abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
+ GFP_KERNEL);
+ if (!abm->actions)
+ goto err_free_thresh;
+
/* We start in legacy mode, make sure advanced queuing is disabled */
- err = nfp_abm_ctrl_qm_disable(abm);
+ err = nfp_abm_fw_init_reset(abm);
if (err)
- goto err_free_abm;
+ goto err_free_act;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_act;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +503,12 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_act:
+ kvfree(abm->actions);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -723,6 +522,9 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->actions);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
@@ -736,6 +538,7 @@ const struct nfp_app_type app_abm = {
.vnic_alloc = nfp_abm_vnic_alloc,
.vnic_free = nfp_abm_vnic_free,
+ .vnic_init = nfp_abm_vnic_init,
.port_get_stats = nfp_abm_port_get_stats,
.port_get_stats_count = nfp_abm_port_get_stats_count,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d98917..49749c60885e 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,19 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
@@ -12,21 +24,64 @@ struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
+/* The possible actions if thresholds are exceeded */
+enum nfp_abm_q_action {
+ /* mark if ECN capable, otherwise drop */
+ NFP_ABM_ACT_MARK_DROP = 0,
+ /* mark if ECN capable, otherwise goto QM */
+ NFP_ABM_ACT_MARK_QUEUE = 1,
+ NFP_ABM_ACT_DROP = 2,
+ NFP_ABM_ACT_QUEUE = 3,
+ NFP_ABM_ACT_NOQUEUE = 4,
+};
+
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @red_support: is RED offload supported
+ * @num_prios: number of supported DSCP priorities
+ * @num_bands: number of supported DSCP priority bands
+ * @action_mask: bitmask of supported actions
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @actions: current FW action configuration
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
+ * @prio_map_len: computed length of FW priority map (in bytes)
+ * @dscp_mask: mask FW will apply on DSCP field
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
+ *
* @q_lvls: queue level control area
* @qm_stats: queue statistics symbol
+ * @q_stats: basic queue statistics (only in per-band case)
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ unsigned int red_support;
+ unsigned int num_prios;
+ unsigned int num_bands;
+ unsigned int action_mask;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ u8 *actions;
+ size_t num_thresholds;
+
+ unsigned int prio_map_len;
+ u8 dscp_mask;
+
enum devlink_eswitch_mode eswitch_mode;
+
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
+ const struct nfp_rtsym *q_stats;
};
/**
@@ -57,16 +112,76 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+ NFP_QDISC_GRED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.num_bands: Number of valid entries in the @red.band table
+ * @red.band: Per-band array of RED instances
+ * @red.band.ecn: ECN marking is enabled (rather than drop)
+ * @red.band.threshold: ECN marking threshold
+ * @red.band.stats: current stats of the RED Qdisc
+ * @red.band.prev_stats: previously reported @red.stats
+ * @red.band.xstats: extended stats for RED - current
+ * @red.band.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
+ struct {
+ unsigned int num_bands;
+
+ struct {
+ bool ecn;
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } band[MAX_DPs];
+ } red;
+ };
};
/**
@@ -76,9 +191,17 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @prio_map: current map of priorities
+ * @has_prio: @prio_map is valid
+ *
+ * @def_band: default band to use
+ * @dscp_map: list of DSCP to band mappings
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -86,26 +209,65 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ u32 *prio_map;
+ bool has_prio;
+
+ u8 def_band;
+ struct list_head dscp_map;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
+{
+ return abm->num_bands > 1;
+}
+
+static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
+}
+
+static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt);
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt);
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *opt);
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
- u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val);
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act);
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+void nfp_abm_prio_map_update(struct nfp_abm *abm);
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 000000000000..2473fb5f75e5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+ return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int i;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
+ &qdisc->red.band[i].stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
+ &qdisc->red.band[i].xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+ }
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].stats.backlog_pkts = 0;
+ qdisc->red.band[i].stats.backlog_bytes = 0;
+ }
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = __nfp_abm_stats_init(alink, i, queue,
+ &qdisc->red.band[i].prev_stats,
+ &qdisc->red.band[i].prev_xstats);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ bool good_red, good_gred;
+ unsigned int i;
+
+ good_red = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !alink->has_prio &&
+ !qdisc->children[0];
+ good_gred = qdisc->type == NFP_QDISC_GRED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1;
+ qdisc->offload_mark = good_red || good_gred;
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->abm->num_bands; i++) {
+ enum nfp_abm_q_action act;
+
+ nfp_abm_ctrl_set_q_lvl(alink, i, queue,
+ qdisc->red.band[i].threshold);
+ act = qdisc->red.band[i].ecn ?
+ NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
+ nfp_abm_ctrl_set_q_act(alink, i, queue, act);
+ }
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
+ }
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ for (i = 0; i < abm->num_bands; i++)
+ __bitmap_set(abm->threshold_undef,
+ i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
+ alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
+ }
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
+ }
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
+ }
+
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ if (children) {
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+ }
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ struct nfp_qdisc *qdisc;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
+
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
+ }
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
+ return 0;
+
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
+ return -EINVAL;
+
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
+ else
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
+{
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_gred_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ if (!stats->xstats[i])
+ continue;
+
+ nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
+ &qdisc->red.band[i].prev_stats,
+ &stats->bstats[i], &stats->qstats[i]);
+ qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
+ &qdisc->red.band[i].prev_xstats,
+ stats->xstats[i]);
+ qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
+ }
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_gred_check_params(struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+ unsigned int i;
+
+ if (opt->set.grio_on || opt->set.wred_on) {
+ nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_def != alink->def_band) {
+ nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
+ alink->def_band, opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_cnt != abm->num_bands) {
+ nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
+ abm->num_bands, opt->parent, opt->handle);
+ return false;
+ }
+
+ for (i = 0; i < abm->num_bands; i++) {
+ struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
+
+ if (!band->present)
+ return false;
+ if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_harddrop) {
+ nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min != band->max) {
+ nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min > S32_MAX) {
+ nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
+ band->min, S32_MAX, opt->parent, opt->handle,
+ i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
+ opt->handle, 0, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = opt->set.dp_cnt;
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
+ qdisc->red.band[i].threshold = opt->set.tab[i].min;
+ }
+ }
+
+ if (qdisc->use_cnt)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_GRED_REPLACE:
+ return nfp_abm_gred_replace(netdev, alink, opt);
+ case TC_GRED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_GRED_STATS:
+ return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
+ &qdisc->red.band[0].prev_xstats,
+ opt->xstats);
+ qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
+ return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
+ &qdisc->red.band[0].prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+
+ if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
+
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = 1;
+ qdisc->red.band[0].ecn = opt->set.is_ecn;
+ qdisc->red.band[0].threshold = opt->set.min;
+ }
+
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
+ unsigned int i, j;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
+
+ for (j = 0; j < red->red.num_bands; j++) {
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.band[j].stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.band[j].prev_stats);
+ }
+ }
+
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ return nfp_abm_mq_create(netdev, alink, opt);
+ case TC_MQ_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}