summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c247
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c224
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c117
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c55
23 files changed, 1154 insertions, 219 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3631d612aaca..25491edc35ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -578,31 +578,78 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
}
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -722,26 +769,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
-{
- struct cgx *cgx = cgxd;
- u64 cfg;
-
- if (is_dev_rpm(cgx))
- return 0;
-
- if (!is_lmac_valid(cgx, lmac_id))
- return -ENODEV;
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
- return 0;
-}
-
static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
@@ -782,21 +809,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
if (!is_lmac_valid(cgx, lmac_id))
return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
-
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -813,21 +827,120 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
}
+
+ if (tx_pause)
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ else
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -1489,6 +1602,16 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1505,6 +1628,10 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
rvu_free_bitmap(&lmac->mac_to_index_bmap);
err_name_free:
kfree(lmac->name);
@@ -1572,6 +1699,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
.mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
.mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index ab1e4abdea38..bd2f33a26eee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -76,6 +76,13 @@
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
@@ -172,4 +179,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index b33e7d1d0851..f30581bf0688 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -17,6 +17,8 @@
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
@@ -33,6 +35,8 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
@@ -110,6 +114,12 @@ struct mac_ops {
int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 58e2aeebc14f..550cb11197bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -172,6 +172,8 @@ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -609,6 +611,21 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
struct npc_set_pkind {
struct mbox_msghdr hdr;
#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
@@ -1603,6 +1620,8 @@ enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index e682b7bfde64..67a6821d2dff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -25,6 +25,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
@@ -46,10 +49,105 @@
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+
+#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -77,8 +175,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -100,15 +198,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
@@ -117,7 +222,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ *clk = ptp->read_ptp_tstmp(ptp);
return 0;
}
@@ -166,7 +271,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
}
@@ -214,6 +323,12 @@ static int ptp_probe(struct pci_dev *pdev,
if (!first_ptp_block)
first_ptp_block = ptp;
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
return 0;
error_free:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 1b81a0493cd3..95a955159f40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -15,6 +15,8 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
u32 clock_rate;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 9ea2f6ac38ec..47e83d7a5804 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -32,6 +32,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
.mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
.mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -96,11 +98,20 @@ int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
+ struct lmac *lmac;
u64 cfg;
if (!rpm)
return;
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
if (enable) {
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
@@ -122,13 +133,94 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
return 0;
}
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -152,8 +244,12 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
@@ -166,56 +262,20 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_t *rpm = rpmd;
u64 cfg;
- if (enable) {
- /* Enable 802.3 pause frame mode */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable receive pause frames */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Set pause time and interval */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
- cfg | RPM_DEFAULT_PAUSE_TIME);
- /* Set pause interval as the hardware default is too short */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
- cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
-
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- }
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -323,3 +383,65 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
cfg &= ~RPMX_RX_TS_PREPEND;
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* reset PFC class quanta and threshold */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index ff580311edd0..9ab8d49dd180 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -33,7 +33,21 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
@@ -45,6 +59,18 @@
#define RPM_LMAC_FWI 0xa
#define RPM_TX_EN BIT_ULL(0)
#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -61,4 +87,8 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5ed94cfb47d2..513b43ecd5be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -807,6 +807,9 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 8a7ac5a8b821..9ffe99830e34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -863,6 +863,45 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
@@ -870,11 +909,9 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
void *cgxd;
- if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
- return 0;
-
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -886,13 +923,11 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
mac_ops = get_mac_ops(cgxd);
if (req->set)
- mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
- &rsp->tx_pause,
- &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
}
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
@@ -1079,3 +1114,67 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 97fb61915379..0fa625e2528e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -296,7 +296,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
- struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
@@ -326,13 +325,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
- mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
-
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
- rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -533,7 +525,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -4578,6 +4570,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
pfvf->hw_rx_tstamp_en = false;
}
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
@@ -5314,6 +5312,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
aq_req.op = NIX_AQ_INSTOP_WRITE;
memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
/* Clear higher layer enable bit in the mid profile, just in case */
aq_req.prof.hl_en = 0;
aq_req.prof_mask.hl_en = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 0048b5946712..d463dc72d80a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -11,4 +11,7 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 66da31f30d3e..b9d7601138ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -222,8 +222,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -233,6 +236,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
@@ -262,6 +269,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
@@ -931,7 +939,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (!is_otx2_lbkvf(pfvf->pdev)) {
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
aq->cq.bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
@@ -1036,7 +1048,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -1049,7 +1061,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1211,7 +1223,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1538,11 +1554,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1704,6 +1727,56 @@ out:
}
EXPORT_SYMBOL(otx2_get_max_mtu);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 14509fc64cce..c587c14ac2a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -17,6 +17,7 @@
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
+#include <linux/time64.h>
#include <mbox.h>
#include <npc.h>
@@ -178,6 +179,10 @@ struct otx2_hw {
u16 rqpool_cnt;
u16 sqpool_cnt;
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
@@ -272,6 +277,8 @@ struct otx2_ptp {
u64 thresh;
struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -396,6 +403,11 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -863,6 +875,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -876,4 +890,11 @@ int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+#endif
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..723d2506d309
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d85db90632d6..fc328de5345e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -371,6 +371,8 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
@@ -379,6 +381,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@@ -386,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@@ -403,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
- if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@@ -413,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@@ -1207,6 +1231,8 @@ end:
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1326,6 +1352,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 77a13fb555fb..54f235c216a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -21,8 +21,10 @@ struct otx2_flow {
u16 entry;
bool is_vf;
u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
int vf;
- bool dmac_filter;
};
enum dmac_req {
@@ -899,6 +901,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
struct npc_install_flow_req *req;
int err, vf = 0;
@@ -940,6 +945,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
mutex_unlock(&pfvf->mbox.lock);
return -EINVAL;
}
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
}
/* ethtool ring_cookie has (VF + 1) for VF */
@@ -951,6 +974,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -966,7 +995,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
return -ENOMEM;
pf_mac->entry = 0;
- pf_mac->dmac_filter = true;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
@@ -1031,7 +1060,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
- if (flow->dmac_filter)
+ if (flow->rule_type & DMAC_FILTER_RULE)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
@@ -1052,7 +1081,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!test_bit(0, &flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
- flow->dmac_filter = true;
+ flow->rule_type |= DMAC_FILTER_RULE;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
@@ -1120,7 +1149,7 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter && iter->entry == 0) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
@@ -1156,7 +1185,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
if (!flow)
return -ENOENT;
- if (flow->dmac_filter) {
+ if (flow->rule_type & DMAC_FILTER_RULE) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
@@ -1174,6 +1203,13 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
@@ -1383,7 +1419,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index d39341e4ab37..441aafc26a08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1311,6 +1311,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
int total_size;
int rbuf_size;
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
/* The data transferred by NIX to memory consists of actual packet
* plus additional data which has timestamp and/or EDSA/HIGIG2
* headers if interface is configured in corresponding modes.
@@ -1694,9 +1697,6 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
-
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
@@ -1863,9 +1863,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
- bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
- bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1875,46 +1873,7 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
- if ((changed & NETIF_F_NTUPLE) && !ntuple)
- otx2_destroy_ntuple_flows(pf);
-
- if ((changed & NETIF_F_NTUPLE) && ntuple) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && !tc &&
- pf->flow_cfg && pf->flow_cfg->nr_flows) {
- netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
- return -EBUSY;
- }
-
- if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
- netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
- return -EINVAL;
- }
-
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -2625,6 +2584,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2778,9 +2740,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
@@ -2925,6 +2889,21 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 61c20907315f..fdc2c9315b91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -294,6 +294,14 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
pfvf->ptp = ptp_ptr;
error:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 6ff284211d7b..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -8,6 +8,21 @@
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 626961a41089..28b19945d716 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -58,7 +58,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
- if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
@@ -190,6 +190,40 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic,
return 0;
}
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
@@ -212,6 +246,10 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
@@ -315,6 +353,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
u8 nr_police = 0;
bool pps = false;
u64 rate;
+ int err;
int i;
if (!flow_action_has_entries(flow_action)) {
@@ -355,6 +394,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
@@ -1023,6 +1066,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
@@ -1052,6 +1096,7 @@ int otx2_init_tc(struct otx2_nic *nic)
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
+EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
@@ -1060,3 +1105,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7c4068c5d1ac..c26de15b2ac3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -148,6 +148,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
@@ -167,14 +168,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
+ u64 timestamp, tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (err)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 925b74ebb8b0..9e87836ed8bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work)
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ netdev->features;
- bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
- struct otx2_nic *vf = netdev_priv(netdev);
-
- if (changed & NETIF_F_NTUPLE) {
- if (!ntuple_enabled) {
- otx2_mcam_flow_del(vf);
- return 0;
- }
-
- if (!otx2_get_maxflows(vf->flow_cfg)) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static const struct net_device_ops otx2vf_netdev_ops = {
@@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -662,6 +650,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -697,16 +686,24 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_unreg_netdev;
- err = otx2_register_dl(vf);
+ err = otx2_init_tc(vf);
if (err)
goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
err_ptp_destroy:
@@ -739,6 +736,22 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);