summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cavium
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cavium')
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c58
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c297
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c2
16 files changed, 901 insertions, 51 deletions
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
depends on 64BIT
---help---
This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config THUNDER_NIC_RGX
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT
+ imply PTP_1588_CLOCK
+ default y
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Cavium ethernet device drivers.
#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (!clock->ptp_clock) {
+ err = -ENODEV;
+ goto error_stop;
+ }
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+ return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+ pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab09e64..f38abf626412 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
size = octdevsize + priv_size + configsize +
(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
- buf = vmalloc(size);
+ buf = vzalloc(size);
if (!buf)
return NULL;
- memset(buf, 0, size);
-
oct = (struct octeon_device *)buf;
oct->priv = (void *)(buf + octdevsize);
oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
- oct->ioq_vector = vmalloc(size);
+ oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
return 1;
- memset(oct->ioq_vector, 0, size);
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
struct u64_stats_sync syncp;
};
+struct cavium_ptp;
+
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
@@ -312,6 +314,33 @@ struct nicvf {
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -521,6 +551,11 @@ struct pfc {
u8 fc_tx;
};
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -540,6 +575,7 @@ union nic_mbx {
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
struct pfc pfc;
+ struct set_ptp ptp;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..7ff66a8194e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -18,7 +18,7 @@
#include "q_struct.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-nic"
+#define DRV_NAME "nicpf"
#define DRV_VERSION "1.0"
struct hw_info {
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* TNS and TNS bypass modes are present only on 88xx */
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
/* Disable TNS mode on both interfaces */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
}
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
}
}
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
goto unlock;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..5603f5ab1fee 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,14 +9,16 @@
/* ETHTOOL Support for VNIC_VF Device*/
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "q_struct.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
struct nicvf_stat {
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
return 0;
}
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
.set_pauseparam = nicvf_set_pauseparam,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = nicvf_get_ts_info,
.get_link_ksettings = nicvf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36c4c58..b68cde9f17d2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,13 +20,15 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
/* Supported devices */
@@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+struct nicvf_xdp_tx {
+ u64 dma_addr;
+ u8 qidx;
+};
+
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+ /* Check if it's a recycled page, if not unmap the DMA mapping.
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
- struct sk_buff **skb)
+ struct rcv_queue *rq, struct sk_buff **skb)
{
struct xdp_buff xdp;
struct page *page;
+ struct nicvf_xdp_tx *xdp_tx = NULL;
u32 action;
- u16 len, offset = 0;
+ u16 len, err, offset = 0;
u64 dma_addr, cpu_addr;
void *orig_data;
@@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
+ xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
xdp.data = (void *)cpu_addr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
rcu_read_lock();
@@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
switch (action) {
case XDP_PASS:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
/* Build SKB and pass on packet to network stack */
*skb = build_skb(xdp.data,
@@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
case XDP_TX:
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
+ case XDP_REDIRECT:
+ /* Save DMA address for use while transmitting */
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ xdp_tx->dma_addr = dma_addr;
+ xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+ if (!err)
+ return true;
+
+ /* Free the page on error */
+ nicvf_unmap_page(nic, page, dma_addr);
+ put_page(page);
+ break;
default:
bpf_warn_invalid_xdp_action(action);
/* fall through */
@@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
put_page(page);
return true;
}
return false;
}
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
int budget, int *subdesc_cnt,
@@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
skb_set_hash(skb, hash, hash_type);
}
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+ struct cqe_rx_t *cqe_rx,
+ struct snd_queue *sq, struct rcv_queue *rq)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
/* For XDP, ignore pkts spanning multiple pages */
if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
/* Packet consumed by XDP */
- if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
return;
} else {
skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
+ nicvf_set_rxtstamp(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
struct snd_queue *sq = &qs->sq[cq_idx];
+ struct rcv_queue *rq = &qs->rq[cq_idx];
unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
@@ -799,7 +875,7 @@ loop:
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -808,10 +884,12 @@ loop:
&tx_pkts, &tx_bytes);
tx_done++;
break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
- case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
@@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
nicvf_free_cq_poll(nic);
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
/* Clear multiqset info */
nic->pnicvf = nic;
return 0;
}
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
@@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
@@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
+ struct nicvf_xdp_tx *xdp_tx;
+ struct snd_queue *sq;
+ struct page *page;
+ int err, qidx;
+
+ if (!netif_running(netdev) || !nic->xdp_prog)
+ return -EINVAL;
+
+ page = virt_to_page(xdp->data);
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ qidx = xdp_tx->qidx;
+
+ if (xdp_tx->qidx >= nic->xdp_tx_queues)
+ return -EINVAL;
+
+ /* Get secondary Qset's info */
+ if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+ qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[qidx - 1];
+ if (!snic)
+ return -EINVAL;
+ qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qidx];
+ err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+ xdp_tx->dma_addr,
+ xdp->data_end - xdp->data);
+ if (err)
+ return -ENOMEM;
+
+ nicvf_xdp_sq_doorbell(snic, sq, qidx);
+ return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+ return;
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
+ .ndo_xdp_xmit = nicvf_xdp_xmit,
+ .ndo_xdp_flush = nicvf_xdp_flush,
+ .ndo_do_ioctl = nicvf_ioctl,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct nicvf *nic;
int err, qcount;
u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12dbde95b..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Reserve space for header modifications by BPF program */
if (rbdr->is_xdp)
- buf_len += XDP_PACKET_HEADROOM;
+ buf_len += XDP_HEADROOM;
/* Check if it's recycled */
if (pgcache)
@@ -224,8 +224,9 @@ ret:
nic->rb_page = NULL;
return -ENOMEM;
}
+
if (pgcache)
- pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ pgcache->dma_addr = *rbuf + XDP_HEADROOM;
nic->rb_page_offset += buf_len;
}
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
if (!rq->enable) {
nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
return;
}
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq->caching = 1;
+ /* Driver have no proper error path for failed XDP RX-queue info reg */
+ WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
/* Send a mailbox msg to PF to config RQ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
@@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
qs_cfg->be = 1;
#endif
qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
int qentry;
if (subdesc_cnt > sq->xdp_free_cnt)
- return 0;
+ return -1;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
@@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
sq->xdp_desc_cnt += subdesc_cnt;
- return 1;
+ return 0;
}
/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor
@@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
if (page_ref_count(page) != 1)
return;
- len += XDP_PACKET_HEADROOM;
+ len += XDP_HEADROOM;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a3230773..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -92,6 +94,9 @@
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
@@ -251,6 +256,7 @@ struct rcv_queue {
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct cmp_queue {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..91d34ea40e2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -21,7 +21,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-BGX"
+#define DRV_NAME "thunder_bgx"
#define DRV_VERSION "1.0"
struct lmac {
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
@@ -172,6 +174,7 @@
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 578c7f8f11bf..2d5e8dab1f70 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-xcv"
+#define DRV_NAME "thunder_xcv"
#define DRV_VERSION "1.0"
/* Register offsets */