summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder.c44
-rw-r--r--drivers/base/cacheinfo.c13
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btbcm.h2
-rw-r--r--drivers/bluetooth/btintel.c2
-rw-r--r--drivers/bluetooth/btusb.c22
-rw-r--r--drivers/bluetooth/hci_bcm.c239
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c85
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c89
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/firmware/efi/capsule-loader.c45
-rw-r--r--drivers/gpio/gpio-bcm-kona.c3
-rw-r--r--drivers/gpio/gpio-brcmstb.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-xgene-sb.c2
-rw-r--r--drivers/gpio/gpiolib.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cp2112.c15
-rw-r--r--drivers/hid/hid-holtekff.c8
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c18
-rw-r--r--drivers/infiniband/core/nldev.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/leds/led-core.c3
-rw-r--r--drivers/mfd/arizona-irq.c4
-rw-r--r--drivers/mfd/rtsx_pcr.c3
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c153
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c218
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c68
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c232
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c29
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c196
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c233
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c36
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h23
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c4
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c44
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c33
-rw-r--r--drivers/net/ethernet/sfc/efx.c43
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c158
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/socionext/Kconfig12
-rw-r--r--drivers/net/ethernet/socionext/Makefile1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/macsec.c72
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/marvell.c170
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/phy-core.c2
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/tap.c41
-rw-r--r--drivers/net/tun.c239
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c23
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nvmem/meson-mx-efuse.c4
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/tegra/xusb.c58
-rw-r--r--drivers/pinctrl/pinctrl-single.c5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/scsi/scsi_sysfs.c5
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c15
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c23
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/host/xhci-debugfs.c16
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/stub_rx.c18
-rw-r--r--drivers/usb/usbip/stub_tx.c6
-rw-r--r--drivers/usb/usbip/usbip_common.c16
-rw-r--r--drivers/usb/usbip/vhci_hcd.c12
-rw-r--r--drivers/usb/usbip/vhci_rx.c23
-rw-r--r--drivers/usb/usbip/vhci_tx.c3
-rw-r--r--drivers/vhost/net.c67
-rw-r--r--drivers/xen/pvcalls-front.c2
230 files changed, 6671 insertions, 1990 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bccec9de0533..a7ecfde66b7b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -482,7 +482,8 @@ enum binder_deferred_state {
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
- * (invariant after initialized)
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
int pid;
struct task_struct *tsk;
struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
- if (files == NULL)
- return -ESRCH;
-
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+ mutex_unlock(&proc->files_lock);
+ return ret;
}
/*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
+ mutex_lock(&proc->files_lock);
if (proc->files)
__fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
int retval;
- if (proc->files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
@@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
+ mutex_lock(&proc->files_lock);
proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
@@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
files = proc->files;
if (files)
proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
}
if (defer & BINDER_DEFERRED_FLUSH)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index eb3af2739537..07532d83be0b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
+static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+{
+ return of_property_read_bool(this_leaf->of_node, "cache-unified");
+}
+
static void cache_of_override_properties(unsigned int cpu)
{
int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = this_cpu_ci->info_list + index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
cache_size(this_leaf);
cache_get_line_size(this_leaf);
cache_nr_sets(this_leaf);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 45a2f59cd935..07e55cd8f8c8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -77,6 +77,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
+ depends on NVMEM || !NVMEM
depends on TTY
help
Bluetooth HCI UART driver.
@@ -107,6 +108,7 @@ config BT_HCIUART_NOKIA
tristate "UART Nokia H4+ protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
depends on PM
select BT_HCIUART_H4
select BT_BCM
@@ -168,6 +170,7 @@ config BT_HCIUART_3WIRE
config BT_HCIUART_INTEL
bool "Intel protocol support"
depends on BT_HCIUART
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_INTEL
help
@@ -181,6 +184,7 @@ config BT_HCIUART_BCM
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 7971bfbd4321..801ea4ca65e4 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -259,7 +259,7 @@ static int bpa10x_flush(struct hci_dev *hdev)
static int bpa10x_setup(struct hci_dev *hdev)
{
- const u8 req[] = { 0x07 };
+ static const u8 req[] = { 0x07 };
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d9e6b41658e5..cfe6ad4cc621 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -44,8 +44,8 @@ struct bcm_set_sleep_mode {
__u8 tristate_control;
__u8 usb_auto_sleep;
__u8 usb_resume_timeout;
- __u8 pulsed_host_wake;
__u8 break_to_host;
+ __u8 pulsed_host_wake;
} __packed;
struct bcm_set_pcm_int_params {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 4459555c9d88..07f00e422e85 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
int btintel_enter_mfg(struct hci_dev *hdev)
{
- const u8 param[] = { 0x01, 0x00 };
+ static const u8 param[] = { 0x01, 0x00 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 808c249845db..29977ebfd031 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/firmware.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -264,6 +265,7 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@@ -271,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
@@ -388,9 +391,8 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
-#define BTUSB_RESET_RESUME 10
-#define BTUSB_DIAG_RUNNING 11
-#define BTUSB_OOB_WAKE_ENABLED 12
+#define BTUSB_DIAG_RUNNING 10
+#define BTUSB_OOB_WAKE_ENABLED 11
struct btusb_data {
struct hci_dev *hdev;
@@ -3121,9 +3123,9 @@ static int btusb_probe(struct usb_interface *intf,
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
- * Explicitly request a device reset on resume.
+ * explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3134,7 +3136,7 @@ static int btusb_probe(struct usb_interface *intf,
* but the USB hub doesn't notice any status change.
* Explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#endif
@@ -3303,14 +3305,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
enable_irq(data->oob_wake_irq);
}
- /* Optionally request a device reset on resume, but only when
- * wakeups are disabled. If wakeups are enabled we assume the
- * device will stay powered up throughout suspend.
- */
- if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
- !device_may_wakeup(&data->udev->dev))
- data->udev->reset_resume = 1;
-
return 0;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 47fc58c9eb49..64800cd2796c 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -29,6 +29,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -52,7 +53,37 @@
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
-/* device driver resources */
+/**
+ * struct bcm_device - device driver resources
+ * @serdev_hu: HCI UART controller struct
+ * @list: bcm_device_list node
+ * @dev: physical UART slave
+ * @name: device name logged by bt_dev_*() functions
+ * @device_wakeup: BT_WAKE pin,
+ * assert = Bluetooth device must wake up or remain awake,
+ * deassert = Bluetooth device may sleep when sleep criteria are met
+ * @shutdown: BT_REG_ON pin,
+ * power up or power down Bluetooth device internal regulators
+ * @set_device_wakeup: callback to toggle BT_WAKE pin
+ * either by accessing @device_wakeup or by calling @btlp
+ * @set_shutdown: callback to toggle BT_REG_ON pin
+ * either by accessing @shutdown or by calling @btpu/@btpd
+ * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
+ * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
+ * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
+ * @clk: clock used by Bluetooth device
+ * @clk_enabled: whether @clk is prepared and enabled
+ * @init_speed: default baudrate of Bluetooth device;
+ * the host UART is initially set to this baudrate so that
+ * it can configure the Bluetooth device for @oper_speed
+ * @oper_speed: preferred baudrate of Bluetooth device;
+ * set to 0 if @init_speed is already the preferred baudrate
+ * @irq: interrupt triggered by HOST_WAKE_BT pin
+ * @irq_active_low: whether @irq is active low
+ * @hu: pointer to HCI UART controller struct,
+ * used to disable flow control during runtime suspend and system sleep
+ * @is_suspended: whether flow control is currently disabled
+ */
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
struct hci_uart serdev_hu;
@@ -63,6 +94,11 @@ struct bcm_device {
const char *name;
struct gpio_desc *device_wakeup;
struct gpio_desc *shutdown;
+ int (*set_device_wakeup)(struct bcm_device *, bool);
+ int (*set_shutdown)(struct bcm_device *, bool);
+#ifdef CONFIG_ACPI
+ acpi_handle btlp, btpu, btpd;
+#endif
struct clk *clk;
bool clk_enabled;
@@ -74,7 +110,7 @@ struct bcm_device {
#ifdef CONFIG_PM
struct hci_uart *hu;
- bool is_suspended; /* suspend/resume flag */
+ bool is_suspended;
#endif
};
@@ -170,11 +206,21 @@ static bool bcm_device_exists(struct bcm_device *device)
static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
{
- if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
- clk_prepare_enable(dev->clk);
+ int err;
- gpiod_set_value(dev->shutdown, powered);
- gpiod_set_value(dev->device_wakeup, powered);
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
+ err = clk_prepare_enable(dev->clk);
+ if (err)
+ return err;
+ }
+
+ err = dev->set_shutdown(dev, powered);
+ if (err)
+ goto err_clk_disable;
+
+ err = dev->set_device_wakeup(dev, powered);
+ if (err)
+ goto err_revert_shutdown;
if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
clk_disable_unprepare(dev->clk);
@@ -182,6 +228,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
dev->clk_enabled = powered;
return 0;
+
+err_revert_shutdown:
+ dev->set_shutdown(dev, !powered);
+err_clk_disable:
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+ clk_disable_unprepare(dev->clk);
+ return err;
}
#ifdef CONFIG_PM
@@ -191,9 +244,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
- pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
+ pm_request_resume(bdev->dev);
return IRQ_HANDLED;
}
@@ -218,8 +269,10 @@ static int bcm_request_irq(struct bcm_data *bcm)
bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
IRQF_TRIGGER_RISING,
"host_wake", bdev);
- if (err)
+ if (err) {
+ bdev->irq = err;
goto unlock;
+ }
device_init_wakeup(bdev->dev, true);
@@ -247,8 +300,8 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
/* Irrelevant USB flags */
.usb_auto_sleep = 0,
.usb_resume_timeout = 0,
+ .break_to_host = 0,
.pulsed_host_wake = 0,
- .break_to_host = 0
};
static int bcm_setup_sleep(struct hci_uart *hu)
@@ -304,6 +357,7 @@ static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
struct list_head *p;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -318,7 +372,10 @@ static int bcm_open(struct hci_uart *hu)
mutex_lock(&bcm_device_lock);
if (hu->serdev) {
- serdev_device_open(hu->serdev);
+ err = serdev_device_open(hu->serdev);
+ if (err)
+ goto err_free;
+
bcm->dev = serdev_device_get_drvdata(hu->serdev);
goto out;
}
@@ -346,17 +403,30 @@ out:
if (bcm->dev) {
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
- bcm_gpio_set_power(bcm->dev, true);
+ err = bcm_gpio_set_power(bcm->dev, true);
+ if (err)
+ goto err_unset_hu;
}
mutex_unlock(&bcm_device_lock);
return 0;
+
+err_unset_hu:
+#ifdef CONFIG_PM
+ bcm->dev->hu = NULL;
+#endif
+err_free:
+ mutex_unlock(&bcm_device_lock);
+ hu->priv = NULL;
+ kfree(bcm);
+ return err;
}
static int bcm_close(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
struct bcm_device *bdev = NULL;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -374,16 +444,17 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM
- pm_runtime_disable(bdev->dev);
- pm_runtime_set_suspended(bdev->dev);
-
- if (device_can_wakeup(bdev->dev)) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
+ pm_runtime_disable(bdev->dev);
}
-#endif
+
+ err = bcm_gpio_set_power(bdev, false);
+ if (err)
+ bt_dev_err(hu->hdev, "Failed to power down");
+ else
+ pm_runtime_set_suspended(bdev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -512,11 +583,8 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
} else if (!bcm->rx_skb) {
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
- if (bcm->dev && bcm_device_exists(bcm->dev)) {
- pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
- pm_runtime_put_autosuspend(bcm->dev->dev);
- }
+ if (bcm->dev && bcm_device_exists(bcm->dev))
+ pm_request_resume(bcm->dev->dev);
mutex_unlock(&bcm_device_lock);
}
@@ -566,6 +634,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
static int bcm_suspend_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
@@ -577,27 +646,37 @@ static int bcm_suspend_device(struct device *dev)
}
/* Suspend the device */
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, false);
- bt_dev_dbg(bdev, "suspend, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, false);
+ if (err) {
+ if (bdev->is_suspended && bdev->hu) {
+ bdev->is_suspended = false;
+ hci_uart_set_flow_control(bdev->hu, false);
+ }
+ return -EBUSY;
}
+ bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+ msleep(15);
+
return 0;
}
static int bcm_resume_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, true);
- bt_dev_dbg(bdev, "resume, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, true);
+ if (err) {
+ dev_err(dev, "Failed to power up\n");
+ return err;
}
+ bt_dev_dbg(bdev, "resume, delaying 15 ms");
+ msleep(15);
+
/* When this executes, the device has woken up already */
if (bdev->is_suspended && bdev->hu) {
bdev->is_suspended = false;
@@ -632,7 +711,7 @@ static int bcm_suspend(struct device *dev)
if (pm_runtime_active(dev))
bcm_suspend_device(dev);
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
error = enable_irq_wake(bdev->irq);
if (!error)
bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -648,6 +727,7 @@ unlock:
static int bcm_resume(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err = 0;
bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
@@ -662,19 +742,21 @@ static int bcm_resume(struct device *dev)
if (!bdev->hu)
goto unlock;
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
disable_irq_wake(bdev->irq);
bt_dev_dbg(bdev, "BCM irq: disabled");
}
- bcm_resume_device(dev);
+ err = bcm_resume_device(dev);
unlock:
mutex_unlock(&bcm_device_lock);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (!err) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
return 0;
}
@@ -771,25 +853,84 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
return 0;
}
+
+static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd,
+ NULL, NULL, NULL)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev->dev);
+ const union acpi_object *obj;
+
+ if (!adev ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd)))
+ return -ENODEV;
+
+ if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) &&
+ obj->buffer.length == 8)
+ dev->init_speed = *(u64 *)obj->buffer.pointer;
+
+ dev->set_device_wakeup = bcm_apple_set_device_wakeup;
+ dev->set_shutdown = bcm_apple_set_shutdown;
+
+ return 0;
+}
+#else
+static inline int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ACPI */
+static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ gpiod_set_value(dev->device_wakeup, awake);
+ return 0;
+}
+
+static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ gpiod_set_value(dev->shutdown, powered);
+ return 0;
+}
+
static int bcm_get_resources(struct bcm_device *dev)
{
dev->name = dev_name(dev->dev);
+ if (x86_apple_machine && !bcm_apple_get_resources(dev))
+ return 0;
+
dev->clk = devm_clk_get(dev->dev, NULL);
- dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
- "device-wakeup",
- GPIOD_OUT_LOW);
+ dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup);
- dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
- GPIOD_OUT_LOW);
+ dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
+ dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
+ dev->set_shutdown = bcm_gpio_set_shutdown;
+
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
if (dev->irq <= 0) {
struct gpio_desc *gpio;
@@ -802,7 +943,7 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->irq = gpiod_to_irq(gpio);
}
- dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
+ dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
return 0;
}
@@ -892,7 +1033,9 @@ static int bcm_probe(struct platform_device *pdev)
list_add_tail(&dev->list, &bcm_device_list);
mutex_unlock(&bcm_device_lock);
- bcm_gpio_set_power(dev, false);
+ ret = bcm_gpio_set_power(dev, false);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to power down\n");
return 0;
}
@@ -994,7 +1137,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
return err;
- bcm_gpio_set_power(bcmdev, false);
+ err = bcm_gpio_set_power(bcmdev, false);
+ if (err)
+ dev_err(&serdev->dev, "Failed to power down\n");
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93781cf..1b76d9585902 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
+ .uevent = of_device_uevent_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..b56b3f711d94 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
+ kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
return;
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
};
+struct safexcel_cipher_req {
+ bool needs_inv;
+};
+
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, &ctx->base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int ret;
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_aes_send(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request req;
+ SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct skcipher_request));
+ memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
- skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
- skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_cipher_send_inv;
+ sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
+ sreq->needs_inv = false;
ctx->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_cipher_send_inv;
+ if (ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.send = safexcel_aes_send;
-
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_send;
+ ctx->base.handle_result = safexcel_handle_result;
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..0c5a5820b06e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
+ bool needs_inv;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len, result_sz = sreq->state_sz;
+ int cache_len;
*ret = 0;
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
- result_sz = crypto_ahash_digestsize(ahash);
- memcpy(sreq->state, areq->result, result_sz);
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
dma_unmap_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+ ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
req->processed += len;
request->req = &areq->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return 1;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int err;
+
+ if (req->needs_inv) {
+ req->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_ahash_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int ret;
+
+ if (req->needs_inv)
+ ret = safexcel_ahash_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_ahash_send_req(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct ahash_request req;
+ AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct ahash_request));
+ memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
- ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
- ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_ahash_send_inv;
+ rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->base.send = safexcel_ahash_send;
+ req->needs_inv = false;
if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_ahash_send_inv;
+ if (ctx->base.needs_inv) {
+ ctx->base.needs_inv = false;
+ req->needs_inv = true;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c4dd84..055e2e8f985a 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
#define NO_FURTHER_WRITE_ACTION -1
-#ifndef phys_to_page
-#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
-#endif
-
/**
* efi_free_all_buff_pages - free all previous allocated buffer pages
* @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
static void efi_free_all_buff_pages(struct capsule_info *cap_info)
{
while (cap_info->index > 0)
- __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
+ __free_page(cap_info->pages[--cap_info->index]);
cap_info->index = NO_FURTHER_WRITE_ACTION;
}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
cap_info->pages = temp_page;
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
return 0;
}
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
**/
static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
{
+ bool do_vunmap = false;
int ret;
- ret = efi_capsule_update(&cap_info->header, cap_info->pages);
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
if (ret) {
pr_err("capsule update failed\n");
return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
goto failed;
}
- cap_info->pages[cap_info->index++] = page_to_phys(page);
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
} else {
- page = phys_to_page(cap_info->pages[cap_info->index - 1]);
+ page = cap_info->pages[cap_info->index - 1];
}
kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
struct capsule_info *cap_info = file->private_data;
kfree(cap_info->pages);
+ kfree(cap_info->phys);
kfree(file->private_data);
file->private_data = NULL;
return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
file->private_data = cap_info;
return 0;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index dfcf56ee3c61..76861a00bb92 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, d->host_data);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
irq_set_noprobe(irq);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 545d43a587b7..bb4f8cf18bd9 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key brcmstb_gpio_irq_lock_class;
+static struct lock_class_key brcmstb_gpio_irq_request_class;
static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, &bank->gc);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
+ &brcmstb_gpio_irq_request_class);
irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8db47f671708..02fa8fe2292a 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
* than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class,
+ &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2313af82fad3..acd59113e08b 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
struct irq_data *irq_data,
- bool early)
+ bool reserve)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index aad84a6306c4..44332b793718 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key);
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
}
int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
unsigned long flags;
int status = 0;
@@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (status)
goto err_remove_from_list;
- status = gpiochip_add_irqchip(chip, key);
+ status = gpiochip_add_irqchip(chip, lock_key, request_key);
if (status)
goto err_remove_chip;
@@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->irq.lock_key);
+ irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
/* Chips that use nested thread handlers have them marked */
if (chip->irq.threaded)
@@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
* @gpiochip: the GPIO chip to add the IRQ chip to
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*/
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct irq_chip *irqchip = gpiochip->irq.chip;
const struct irq_domain_ops *ops;
@@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.default_type = type;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
if (gpiochip->irq.domain_ops)
ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
* @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*
* This function closely associates a certain irqchip with a certain
* gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type,
bool threaded,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct device_node *of_node;
@@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
gpiochip->irq.default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
gpiochip->irq.domain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1aba47..34daf895f848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void dpp1_cm_set_gamut_remap(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c244bf5..ed1216b53465 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust)
+ enum dc_color_space colorspace)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t ocsc_mode = 0;
- if (default_adjust != NULL) {
- switch (default_adjust->out_color_space) {
+ switch (colorspace) {
case COLOR_SPACE_SRGB:
case COLOR_SPACE_2020_RGB_FULLRANGE:
ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
case COLOR_SPACE_UNKNOWN:
default:
break;
- }
}
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c3b454..05dc01e54531 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
tbl_entry.color_space = color_space;
//tbl_entry.regval = matrix;
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ } else {
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a68460edcd..9420dfb94d39 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
void (*opp_set_csc_default)(
struct dpp *dpp,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void (*opp_set_csc_adjustment)(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..a0f4d2a2a481 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
u32 addr = drm_fb_obj(fb)->dev_addr;
- int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
num_planes = 3;
- for (i = 0; i < num_planes; i++)
+ addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * fb->format->cpp[i];
+ x * format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
if (plane->fb)
drm_framebuffer_put(plane->fb);
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
/* Power down most RAMs and FIFOs if this is the primary plane */
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
dma_ctrl0_mask = CFG_GRA_ENA;
} else {
+ /* Power down the Y/U/V FIFOs */
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
dma_ctrl0_mask = CFG_DMA_ENA;
}
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..bfd3514fbe9b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
};
struct armada_plane_state {
+ u16 src_x;
+ u16 src_y;
u32 src_hw;
u32 dst_hw;
u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..aba947696178 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ const struct drm_format_info *format;
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
uint32_t val, ctrl0;
unsigned idx = 0;
- bool visible;
+ bool visible, fb_changed;
int ret;
trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
+ /*
+ * Shifting a YUV packed format image by one pixel causes the U/V
+ * planes to swap. Compensate for it by also toggling the UV swap.
+ */
+ format = fb->format;
+ if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ fb_changed = plane->fb != fb ||
+ dplane->base.state.src_x != src.x1 >> 16 ||
+ dplane->base.state.src_y != src.y1 >> 16;
+
if (!dcrtc->plane) {
dcrtc->plane = plane;
armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
+ if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- if (plane->fb != fb) {
- u32 addrs[3], pixel_format;
- int num_planes, hsub;
+ if (fb_changed) {
+ u32 addrs[3];
/*
* Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
- src_y = src.y1 >> 16;
- src_x = src.x1 >> 16;
+ dplane->base.state.src_y = src_y = src.y1 >> 16;
+ dplane->base.state.src_x = src_x = src.x1 >> 16;
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
- pixel_format = fb->format->format;
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = fb->format->num_planes;
-
- /*
- * Annoyingly, shifting a YUYV-format image by one pixel
- * causes the U/V planes to toggle. Toggle the UV swap.
- * (Unfortunately, this causes momentary colour flickering.)
- */
- if (src_x & (hsub - 1) && num_planes == 1)
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
-
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c582b6..e143004e66d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3866c49bc390..333f40bc03bb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6977,6 +6977,7 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
#define MASK_WAKEMEM (1<<13)
#define SKL_DFSM _MMIO(0x51000)
@@ -8522,6 +8523,7 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62b71c0..60cf4e58389a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
WARN_ON(vco != 8100000 && vco != 8640000);
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
- I915_WRITE(CDCLK_CTL, val);
- POSTING_READ(CDCLK_CTL);
-
/*
* We always enable DPLL0 with the lowest link rate possible, but still
* taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack;
+ u32 freq_select, pcu_ack, cdclk_ctl;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* set CDCLK_CTL */
+ /* Choose frequency for this cdclk */
switch (cdclk) {
case 450000:
case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
+ cdclk_ctl = I915_READ(CDCLK_CTL);
+
+ if (dev_priv->cdclk.hw.vco != vco) {
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ }
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ POSTING_READ(CDCLK_CTL);
+
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30cf273d57aa..123585eeb87d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12544,11 +12544,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -14462,6 +14466,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -15270,6 +15276,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430fccdc..55ea5eb3b7df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c63d3b..7e115f3927f6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC5\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626eddf24d5..23db74ae1826 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
/* then read the message */
msg.len = cnt & 0xf;
+ if (msg.len > CEC_MAX_MSG_SIZE - 2)
+ msg.len = CEC_MAX_MSG_SIZE - 2;
msg.msg[0] = hdmi_read_reg(core->base,
HDMI_CEC_RX_CMD_HEADER);
msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
}
}
-static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
-{
- if (stat1 & 2) {
- u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
-
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_NACK |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
- } else if (stat1 == 0) {
- cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
- }
-}
-
void hdmi4_cec_irq(struct hdmi_core_data *core)
{
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
- if (stat0 & 0x40)
+ if (stat0 & 0x20) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
- else if (stat0 & 0x24)
- hdmi_cec_transmit_fifo_empty(core, stat1);
- if (stat1 & 2) {
+ } else if (stat1 & 0x02) {
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
cec_transmit_done(core->adap,
CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES,
0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
if (stat0 & 0x02)
hdmi_cec_received_msg(core);
- if (stat1 & 0x3)
- REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
/*
* Enable CEC interrupts:
* Transmit Buffer Full/Empty Change event
- * Transmitter FIFO Empty event
* Receiver FIFO Not Empty event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
/*
* Enable CEC interrupts:
- * RX FIFO Overrun Error event
- * Short Pulse Detected event
* Frame Retransmit Count Exceeded event
- * Start Bit Irregularity event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
/* cec calibration enable (self clearing) */
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b5ba6441489f..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1007,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f3fcb836a1f9..0c3f608131cf 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 68cdc962265b..271f31461da4 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
(u8 *)&word, 2);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- size = I2C_SMBUS_BLOCK_DATA;
- /* fallthrough */
+ if (read_write == I2C_SMBUS_READ) {
+ read_length = data->block[0];
+ count = cp2112_write_read_req(buf, addr, read_length,
+ command, NULL, 0);
+ } else {
+ count = cp2112_write_req(buf, addr, command,
+ data->block + 1,
+ data->block[0]);
+ }
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (I2C_SMBUS_READ == read_write) {
count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_WORD_DATA:
data->word = le16_to_cpup((__le16 *)buf);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(data->block + 1, buf, read_length);
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (read_length > I2C_SMBUS_BLOCK_MAX) {
ret = -EPROTO;
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545fc3ae..edc0f64bb584 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -32,10 +32,6 @@
#ifdef CONFIG_HOLTEK_FF
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
-MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
-
/*
* These commands and parameters are currently known:
*
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
.probe = holtek_probe,
};
module_hid_driver(holtek_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 76ed9a216f10..610223f0e945 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
pr_debug("child device %s unregistered\n",
dev_name(&device_obj->device));
+ kset_unregister(device_obj->channels_kset);
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..66f0268f37a6 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
}
#endif
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 30914f3baa5f..465520627e4b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
{
struct ib_device *device;
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL;
}
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+ struct ib_device *device;
+
+ down_read(&lists_rwsem);
+ device = __ib_device_get_by_index(index);
+ if (device)
+ get_device(&device->dev);
+
+ up_read(&lists_rwsem);
+ return device;
+}
+
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9a05245a1acf..0dcd1aa6f683 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_dev_info(msg, device);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
- if (!rdma_is_port_valid(device, port))
- return -EINVAL;
+ if (!rdma_is_port_valid(device, port)) {
+ err = -EINVAL;
+ goto err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_port_info(msg, device, port);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(ifindex);
+ device = ib_device_get_by_index(ifindex);
if (!device)
return -EINVAL;
@@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh);
}
-out: cb->args[0] = idx;
+out:
+ put_device(&device->dev);
+ cb->args[0] = idx;
return skb->len;
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..8880351df179 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..bfa576aa9f03 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
return -ENOMEM;
attr->qp_state = IB_QPS_INIT;
- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
attr->pkey_index = 0;
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto destroy_ib;
}
- guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
+ guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "get_cycles"
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return union_desc;
dev_err(&intf->dev,
- "Union descriptor to short (%d vs %zd\n)",
+ "Union descriptor too short (%d vs %zd)\n",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
0, width, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
0, height, 0, 0);
- input_set_abs_params(mtouch, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
if (ret) {
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6 ... 14:
+ case 6 ... 15:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..a458e5ec9e41 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/async.h>
#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
}
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
#include <linux/of.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb004091d..97baf88d9505 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
struct irq_cfg *cfg);
static int irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1UL << ias) - 1;
domain->geometry.force_aperture = true;
- smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (ret < 0)
+ if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
+ return ret;
+ }
- return ret;
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
}
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
- int i;
+ int i, j;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ /* Bridged PCI devices may end up with duplicated IDs */
+ for (j = 0; j < i; j++)
+ if (fwspec->ids[j] == sid)
+ break;
+ if (j < i)
+ continue;
+
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 76a193c7fcfc..66f69af2c219 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
}
static int intel_irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
intel_ir_reconfigure_irte(irq_data, true);
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4039e64cd342..06f025fd5726 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static int its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
}
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 06f29cf5018a..cee59fe1321c 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
*/
static struct lock_class_key intc_irqpin_irq_lock_class;
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
intc_irqpin_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
- irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
+ irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+ &intc_irqpin_irq_request_class);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
return 0;
}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index f3654fd2eaf3..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -186,8 +186,9 @@ void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- led_stop_software_blink(led_cdev);
+ del_timer_sync(&led_cdev->blink_timer);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 09cf3699e354..a307832d7e45 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
};
static struct lock_class_key arizona_irq_lock_class;
+static struct lock_class_key arizona_irq_request_class;
static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
struct arizona *data = h->host_data;
irq_set_chip_data(virq, data);
- irq_set_lockdep_class(virq, &arizona_irq_lock_class);
+ irq_set_lockdep_class(virq, &arizona_irq_lock_class,
+ &arizona_irq_request_class);
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
irq_set_noprobe(virq);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 590fb9aad77d..c3ed885c155c 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1543,6 +1543,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
rtsx_pci_power_off(pcr, HOST_ENTER_S1);
pci_disable_device(pcidev);
+ free_irq(pcr->irq, (void *)pcr);
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
}
#else /* CONFIG_PM */
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..9285f60e5783 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
switch (command) {
case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
break;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 3cd371c94e83..634c51e6b8ae 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -543,7 +543,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
data = be32_to_cpup((__be32 *)&cf->data[0]);
priv->write(data, &priv->tx_mb->data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
priv->write(data, &priv->tx_mb->data[1]);
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b00358297424..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..8bf80ad9dc44 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
rc);
- return rc;
+ return (rc > 0) ? 0 : rc;
}
static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 5d1753cfacea..ed6828821fbd 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
tbp = peer_tb;
}
- if (tbp[IFLA_IFNAME]) {
+ if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 561b05089cb6..db830a1141d9 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1497,10 +1497,13 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- /* Older models support a different tag format that we do not
- * support in net/dsa/tag_brcm.c yet.
+ /* Older models (5325, 5365) support a different tag format that we do
+ * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+ * mode to be turned on which means we need to specifically manage ARL
+ * misses on multicast addresses (TBD).
*/
- if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+ if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+ !b53_can_enable_brcm_tags(ds, port))
return DSA_TAG_PROTO_NONE;
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int dirty_tx; /* The ring entries to be free()ed. */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
- struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
- timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
- vp->cur_rx = vp->dirty_rx = 0;
+ vp->cur_rx = 0;
/* Initialize the RxEarly register as recommended. */
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
int i;
int retval;
+ dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ break;
+ vp->rx_ring[i].addr = cpu_to_le32(dma);
}
if (i != RX_RING_SIZE) {
pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
- int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+ int rx_work_limit = RX_RING_SIZE;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
+ dma_addr_t newdma;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
+ /* Pre-allocate the replacement skb. If it or its
+ * mapping fails then recycle the buffer thats already
+ * in place
+ */
+ newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (!newskb) {
+ dev->stats.rx_dropped++;
+ goto clear_complete;
+ }
+ newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ dev->stats.rx_dropped++;
+ consume_skb(newskb);
+ goto clear_complete;
+ }
+
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
- vp->rx_skbuff[entry] = NULL;
+ vp->rx_skbuff[entry] = newskb;
+ vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
netif_rx(skb);
dev->stats.rx_packets++;
}
- entry = (++vp->cur_rx) % RX_RING_SIZE;
- }
- /* Refill the Rx ring buffers. */
- for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
- struct sk_buff *skb;
- entry = vp->dirty_rx % RX_RING_SIZE;
- if (vp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
- if (skb == NULL) {
- static unsigned long last_jif;
- if (time_after(jiffies, last_jif + 10 * HZ)) {
- pr_warn("%s: memory shortage\n",
- dev->name);
- last_jif = jiffies;
- }
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
- mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
- break; /* Bad news! */
- }
- vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
- vp->rx_skbuff[entry] = skb;
- }
+clear_complete:
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
}
return 0;
}
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory. Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
- struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
- struct net_device *dev = vp->mii.dev;
-
- spin_lock_irq(&vp->lock);
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
- boomerang_rx(dev);
- if (vortex_debug > 1) {
- pr_debug("%s: rx_oom_timer %s\n", dev->name,
- ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
- }
- spin_unlock_irq(&vp->lock);
-}
-
static void
vortex_down(struct net_device *dev, int final_down)
{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a6f283232cb7..6975150d144e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
{
@@ -1570,7 +1573,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
static int ena_up_complete(struct ena_adapter *adapter)
{
- int rc, i;
+ int rc;
rc = ena_rss_configure(adapter);
if (rc)
@@ -1589,17 +1592,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
ena_napi_enable_all(adapter);
- /* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
- ena_unmask_interrupt(&adapter->tx_ring[i],
- &adapter->rx_ring[i]);
-
- /* schedule napi in case we had pending packets
- * from the last time we disable napi
- */
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-
return 0;
}
@@ -1736,7 +1728,7 @@ create_err:
static int ena_up(struct ena_adapter *adapter)
{
- int rc;
+ int rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
@@ -1779,6 +1771,17 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ /* Enable completion queues interrupt */
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_unmask_interrupt(&adapter->tx_ring[i],
+ &adapter->rx_ring[i]);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
return rc;
err_up:
@@ -1889,6 +1892,17 @@ static int ena_close(struct net_device *netdev)
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
+ /* Check for device status and issue reset if needed*/
+ check_for_admin_com_state(adapter);
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Destroy failure, restarting device\n");
+ ena_dump_stats_to_dmesg(adapter);
+ /* rtnl lock already obtained in dev_ioctl() layer */
+ ena_destroy_device(adapter);
+ ena_restore_device(adapter);
+ }
+
return 0;
}
@@ -2549,11 +2563,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
ena_com_set_admin_running_state(ena_dev, false);
- ena_close(netdev);
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
/* Before releasing the ENA resources, a device reset is required.
* (to prevent the device from accessing them).
- * In case the reset flag is set and the device is up, ena_close
+ * In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 57eb26dfc059..d7c98e807ca8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
- be16_to_cpu(skb->protocol));
+ netdev_WARN_ONCE(bp->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
}
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 89c3c8760a78..cf6ebf1e324b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1645,6 +1645,8 @@ next_rx:
rxr->rx_next_cons = NEXT_RX(cons);
next_rx_no_prod:
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
*raw_cons = tmp_raw_cons;
return rc;
@@ -1802,6 +1804,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
+ cpr->event_ctr++;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
@@ -2025,6 +2028,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
+ if (bp->flags & BNXT_FLAG_DIM) {
+ struct net_dim_sample dim_sample;
+
+ net_dim_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
mmiowb();
return work_done;
}
@@ -2617,6 +2629,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
}
}
@@ -4593,6 +4607,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = cpu_to_le16(flags);
}
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal coal;
+ unsigned int grp_idx;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+ coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+ coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+ if (!bnapi->rx_ring)
+ return -ENODEV;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+ bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+ grp_idx = bnapi->index;
+ req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+ return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+ HWRM_CMD_TIMEOUT);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
@@ -5715,7 +5759,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
bp->bnapi[i]->in_reset = false;
+
+ if (bp->bnapi[i]->rx_ring) {
+ INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+ cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
napi_enable(&bp->bnapi[i]->napi);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2d268fc26f5e..89887a88b1bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include <net/dst_metadata.h>
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <linux/net_dim.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -608,6 +609,17 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
struct bnxt_tpa_info {
void *data;
u8 *data_ptr;
@@ -672,6 +684,13 @@ struct bnxt_cp_ring_info {
u32 cp_raw_cons;
void __iomem *cp_doorbell;
+ struct bnxt_coal rx_ring_coal;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 event_ctr;
+
+ struct net_dim dim;
+
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -946,17 +965,6 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
-struct bnxt_coal {
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
- /* RING_IDLE enabled when coal ticks < idle_thresh */
- u16 idle_thresh;
- u8 bufs_per_record;
- u8 budget;
-};
-
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1128,6 +1136,7 @@ struct bnxt {
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+ #define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1425,4 +1434,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct bnxt_cp_ring_info *cpr = container_of(dim,
+ struct bnxt_cp_ring_info,
+ dim);
+ struct bnxt_napi *bnapi = container_of(cpr,
+ struct bnxt_napi,
+ cp_ring);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+ bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fe7599f404bf..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
+ coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
int rc = 0;
u16 mult;
+ if (coal->use_adaptive_rx_coalesce) {
+ bp->flags |= BNXT_FLAG_DIM;
+ } else {
+ if (bp->flags & BNXT_FLAG_DIM) {
+ bp->flags &= ~(BNXT_FLAG_DIM);
+ goto reset_coalesce;
+ }
+ }
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
update_stats = true;
}
+reset_coalesce:
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..c9617675f934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 2ae5ed151369..1d9b08c20f95 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -416,7 +416,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
- if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+ if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 69d0b64e6986..baa67d362051 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -345,7 +345,6 @@ struct adapter_params {
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
- unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers; /* firmware version */
unsigned int bs_vers; /* bootstrap version */
@@ -826,6 +825,10 @@ struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -840,6 +843,8 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -869,7 +874,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -1306,6 +1314,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1638,6 +1647,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 87ac1e4dafc1..3293980ce6e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -2987,6 +2988,151 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* For T6 fw reserves last 2 entries for
+ * storing match all mac filter (config file entry).
+ */
+ if (!adapter->rawf_cnt)
+ return;
+
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ if (adapter->rawf_cnt) {
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,6 +3164,9 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
@@ -5080,6 +5229,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f937789..eab781fab2a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -770,12 +770,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -788,10 +795,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -810,13 +818,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -827,9 +842,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
@@ -1154,6 +1170,102 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1177,6 +1289,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1342,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,9 +1357,12 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
@@ -1270,33 +1389,58 @@ out_free: dev_kfree_skb_any(skb);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f2a60e01d563..6d76851a4da9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2847,8 +2847,6 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
@@ -3561,8 +3559,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- unsigned int fw_img_start = adap->params.sf_fw_start;
- unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+ unsigned int fw_start_sec = FLASH_FW_START_SEC;
+ unsigned int fw_size = FLASH_FW_MAX_SIZE;
+ unsigned int fw_start = FLASH_FW_START;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3578,9 +3577,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size differs from size in FW header\n");
return -EINVAL;
}
- if (size > FW_MAX_SIZE) {
+ if (size > fw_size) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
- FW_MAX_SIZE);
+ fw_size);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
@@ -3607,11 +3606,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = fw_img_start;
+ addr = fw_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -3621,7 +3620,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- fw_img_start + offsetof(struct fw_hdr, fw_ver),
+ fw_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
@@ -7468,6 +7467,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 863bc29153d9..d9c06d6dc7b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -2511,6 +2511,17 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2537,8 +2548,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 427f252a9087..f3310d5b3c4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2060,6 +2060,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2076,6 +2077,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2087,6 +2095,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2096,6 +2111,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2122,6 +2143,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..129b914a434c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd5246dfae..1a49297224ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
- int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ int num_entries = ARRAY_SIZE(cmd_priv_map);
u32 cmd_privileges = adapter->cmd_privileges;
for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e17d10b8b041..90aa69a08922 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3489,6 +3489,10 @@ fec_probe(struct platform_device *pdev)
goto failed_regulator;
}
} else {
+ if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto failed_regulator;
+ }
fep->reg_phy = NULL;
}
@@ -3572,8 +3576,9 @@ failed_clk_ipg:
failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
-failed_phy:
of_node_put(phy_node);
+failed_phy:
+ dev_id--;
failed_ioremap:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 82e9a8034557..adec88d941df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -274,6 +274,8 @@ struct hnae3_ae_dev {
* Get firmware version
* get_mdix_mode()
* Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
* set_vlan_filter()
* Set vlan filter config of Ports
* set_vf_vlan_filter()
@@ -382,6 +384,7 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 320ae8892a68..14c7625c6f19 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -247,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
return 0;
out_start_err:
@@ -286,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -1101,6 +1106,11 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+
changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -1116,11 +1126,12 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
unsigned int start;
unsigned int idx;
@@ -1128,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
@@ -1136,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.tx_busy;
+ tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1144,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.non_vld_descs;
+ rx_drop += ring->stats.err_pkt_len;
+ rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1159,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = netdev->stats.rx_dropped;
- stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+ stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1390,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+ netdev->mtu = new_mtu;
+
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
@@ -1549,6 +1574,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1577,12 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF))
+ netdev->hw_features |=
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 379c01dddbf8..d3cb3ec2c62b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
struct hns3_stats {
char stats_string[ETH_GSTRING_LEN];
- int stats_size;
int stats_offset;
};
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
- .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
- .stats_offset = offsetof(struct hns3_enet_ring, stats), \
-} \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("tx_pkts", tx_pkts),
- HNS3_TQP_STAT("tx_bytes", tx_bytes),
- HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
- HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
};
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("rx_pkts", rx_pkts),
- HNS3_TQP_STAT("rx_bytes", rx_bytes),
- HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
- HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
- HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
- HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
- HNS3_TQP_STAT("rx_l2_err", l2_err),
- HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps)
+ u32 stat_count, u32 num_tqps, const char *prefix)
{
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
u32 size_left;
u32 i, j;
u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
/* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, rx_prefix);
return data;
}
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring;
u8 *stat;
- u32 i;
+ int i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring;
- for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
- for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f5baba216e48..3c3159b2d3bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -556,8 +556,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-#define HCLGE_MAC_MIN_MTU 64
-#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0874acf5ef39..d7352f5f75c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,6 +17,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -35,6 +36,7 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
@@ -279,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
@@ -293,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
@@ -315,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
@@ -329,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
- {"mac_trans_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
- {"mac_trans_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
- {"mac_trans_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
- {"mac_trans_err_all_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
- {"mac_trans_from_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
- {"mac_trans_from_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
- {"mac_rcv_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
- {"mac_rcv_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
- {"mac_rcv_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
- {"mac_rcv_fcs_err_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
- {"mac_rcv_send_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
- {"mac_rcv_send_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -463,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -525,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -545,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
return 0;
@@ -587,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -595,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -643,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -699,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -724,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status);
hclge_update_netstat(hw_stats, net_stats);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -2203,8 +2241,11 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
static int hclge_mac_init(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int mtu;
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2238,11 +2279,25 @@ static int hclge_mac_init(struct hclge_dev *hdev)
}
ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"set default mac_vlan_mask fail ret=%d\n", ret);
+ return ret;
+ }
- return ret;
+ if (netdev)
+ mtu = netdev->mtu;
+ else
+ mtu = ETH_DATA_LEN;
+
+ ret = hclge_set_mtu(handle, mtu);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2381,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev);
}
@@ -2780,9 +2836,13 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
+ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+ hclge_update_stats_for_all(hdev);
+ hdev->hw_stats.stats_timer = 0;
+ }
+
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_stats_for_all(hdev);
hclge_service_complete(hdev);
}
@@ -4197,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
@@ -4208,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL;
}
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail, ret =%d.\n",
+ ret);
- if (!hclge_add_uc_addr(handle, new_addr)) {
- ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
- return 0;
+ ret = hclge_add_uc_addr(handle, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add uc mac address fail, ret =%d.\n",
+ ret);
+
+ ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore uc mac address fail, ret =%d.\n",
+ ret);
+ }
+
+ return -EIO;
}
- return -EIO;
+ ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "configure mac pause address fail, ret =%d.\n",
+ ret);
+ return -EIO;
+ }
+
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+ return 0;
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4241,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0;
}
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
@@ -4469,8 +4566,6 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_FILTER_TYPE_VF 0
-#define HCLGE_FILTER_TYPE_PORT 1
#define HCLGE_DEF_VLAN_TYPE 0x8100
struct hnae3_handle *handle;
@@ -4542,16 +4637,21 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ int max_frm_size;
int ret;
- if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
- hdev->mps = new_mtu;
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(new_mtu);
+ req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -4559,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret;
}
+ hdev->mps = max_frm_size;
+
return 0;
}
@@ -4689,22 +4791,19 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
- enum hclge_fc_mode fc_mode;
int ret;
if (rx_en && tx_en)
- fc_mode = HCLGE_FC_FULL;
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
else if (rx_en && !tx_en)
- fc_mode = HCLGE_FC_RX_PAUSE;
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
else if (!rx_en && tx_en)
- fc_mode = HCLGE_FC_TX_PAUSE;
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
else
- fc_mode = HCLGE_FC_NONE;
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
- if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
- hdev->fc_mode_last_time = fc_mode;
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
- }
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
if (ret) {
@@ -4713,7 +4812,7 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
return ret;
}
- hdev->tm_info.fc_mode = fc_mode;
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
return 0;
}
@@ -5482,6 +5581,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 28cc063306c4..eeb6c8d66e4e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -101,6 +101,11 @@
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -112,6 +117,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX
};
@@ -381,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_overrsize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_max_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+ u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num;
@@ -399,33 +414,43 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_overrsize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_max_oct_pkt_num;
-
- u64 mac_trans_fragment_pkt_num;
- u64 mac_trans_undermin_pkt_num;
- u64 mac_trans_jabber_pkt_num;
- u64 mac_trans_err_all_pkt_num;
- u64 mac_trans_from_app_good_pkt_num;
- u64 mac_trans_from_app_bad_pkt_num;
- u64 mac_rcv_fragment_pkt_num;
- u64 mac_rcv_undermin_pkt_num;
- u64 mac_rcv_jabber_pkt_num;
- u64 mac_rcv_fcs_err_pkt_num;
- u64 mac_rcv_send_app_good_pkt_num;
- u64 mac_rcv_send_app_bad_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+ u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
};
+#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats;
+ u32 stats_timer;
};
struct hclge_vlan_type_cfg {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index ea9355d82560..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+ trans_time);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{
u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret;
u8 i;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_mac_pause_param_setup_hw(hdev);
+ }
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 16f413956f17..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap;
};
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+};
+
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
@@ -119,4 +129,5 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 31866057bc7d..655f522e44aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -49,7 +49,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
return status;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[4]);
+ le32_to_cpu(desc.data[1]);
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
true);
@@ -63,7 +63,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
return status;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[4]);
+ le32_to_cpu(desc.data[1]);
}
return 0;
@@ -105,7 +105,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < hdev->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -113,7 +113,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < hdev->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -1288,7 +1288,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
hw->hdev = hdev;
- hw->io_base = pci_iomap(pdev, 2, 0);;
+ hw->io_base = pci_iomap(pdev, 2, 0);
if (!hw->io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6911b7cc06c5..461014b7ccdd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2453,6 +2453,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
struct ibmvnic_sub_crq_queue *scrq = instance;
struct ibmvnic_adapter *adapter = scrq->adapter;
+ /* When booting a kdump kernel we can hit pending interrupts
+ * prior to completing driver initialization.
+ */
+ if (unlikely(adapter->state != VNIC_OPEN))
+ return IRQ_NONE;
+
adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a1ffbb..dc71e87c3260 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + stat->stat_offset;
break;
default:
- WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
- stat->type, i);
+ netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+ stat->type, i);
continue;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..c5776340517c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
@@ -1594,7 +1594,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1605,7 +1605,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1614,8 +1614,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..40c5f7628aa1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -5236,7 +5236,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -5246,7 +5246,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5255,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5271,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5282,14 +5282,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -5364,11 +5364,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5373,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -5397,7 +5392,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5434,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index cfd788b4fd7a..34173f821fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+ I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+ I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+ I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
I40E_PF_STAT("rx_size_64", stats.rx_size_64),
I40E_PF_STAT("rx_size_127", stats.rx_size_127),
I40E_PF_STAT("rx_size_255", stats.rx_size_255),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..2ab22eba0c7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ /* Copy the address first, so that we avoid a possible race with
+ * .set_rx_mode(). If we copy after changing the address in the filter
+ * list, we might open ourselves to a narrow race window where
+ * .set_rx_mode could delete our dev_addr filter and prevent traffic
+ * from passing.
+ */
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
i40e_del_mac_filter(vsi, addr);
return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
- /* Set L4type to both TCP and UDP support */
- mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+ /* Set L4type for TCP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
- return -EINVAL;
+ return -EOPNOTSUPP;
- /* Make sure port is specified, otherwise bail out, for channel
- * specific cloud filter needs 'L4 port' to be non-zero
+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+ * ports are not supported via big buffer now.
*/
- if (!filter->dst_port)
- return -EINVAL;
+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+ return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
- return -EINVAL;
+ return -EOPNOTSUPP;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..425713fb72e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -389,7 +389,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code;
- u16 read_size = *words;
+ u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..b3cc89cc3a86 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -343,6 +343,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
@@ -400,13 +431,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2a8a85e3ae8f..40edb6e5e6f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3061,10 +3061,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -3072,7 +3092,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..5a708c363d99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1502,19 +1502,19 @@ struct i40e_lldp_variables {
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1526,16 +1526,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1545,8 +1545,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1573,11 +1573,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)ls->link_speed;
+ i40e_virtchnl_link_speed(ls->link_speed);
}
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..435a112d09f5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
@@ -1562,7 +1562,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1573,7 +1573,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1582,8 +1582,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..a94648429a5b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1202,7 +1202,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
}
/**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -1212,7 +1212,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1221,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1237,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -1248,16 +1248,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1330,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1339,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -1363,7 +1358,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1400,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..1ba29bb85b67 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..6afc31616e04 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1422,19 +1422,19 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1446,16 +1446,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1465,8 +1465,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1493,11 +1493,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..47040ab2e298 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -199,6 +199,9 @@ struct i40evf_adapter {
wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
int num_req_queues;
@@ -206,7 +209,6 @@ struct i40evf_adapter {
/* TX */
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
- struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..f92587aba3c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
- }
- }
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
- struct i40e_hw *hw = &adapter->hw;
- int i;
- u32 dyn_ctl;
-
- if (mask & 1) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
- }
- for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i)) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct net_device *netdev = data;
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
- u32 val;
/* handle non-queue interrupts, these reads clear the registers */
- val = rd32(hw, I40E_VFINT_ICR01);
- val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- val = rd32(hw, I40E_VFINT_DYN_CTL01) |
- I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, val);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* @adapter: board private structure
* @vlan: vlan tag
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f = NULL;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- goto out;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
clearout:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -768,21 +727,16 @@ out:
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* @adapter: board private structure
* @macaddr: the MAC address
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
- int count = 50;
if (!macaddr)
return NULL;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return NULL;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f) {
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- return NULL;
- }
+ if (!f)
+ goto clearout;
ether_addr_copy(f->macaddr, macaddr);
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
- int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
i40evf_add_filter(adapter, mca->addr);
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0) {
- dev_err(&adapter->pdev->dev,
- "Failed to get lock in %s\n", __func__);
- return;
- }
- }
- /* remove filter if not in netdev list */
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
/**
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
void i40evf_down(struct i40evf_adapter *adapter)
{
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
- usleep_range(500, 1000);
-
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->remove = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1770,13 +1716,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING) {
- i40evf_irq_enable_queues(adapter, ~0);
- i40evf_fire_sw_int(adapter, 0xFF);
- } else {
- i40evf_fire_sw_int(adapter, 0x1);
- }
-
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1735,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ if (adapter->state == __I40EVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -1808,6 +1751,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
i40evf_free_all_rx_resources(adapter);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
@@ -1819,6 +1764,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
kfree(fv);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1854,6 +1801,7 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
+ bool running;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
@@ -1913,7 +1861,13 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ running = (adapter->state == __I40EVF_RUNNING);
+
+ if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1948,6 +1902,8 @@ continue_reset:
adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -1956,15 +1912,19 @@ continue_reset:
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
vlf->add = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
- if (netif_running(adapter->netdev)) {
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+ */
+ if (running) {
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
if (err)
@@ -1993,9 +1953,13 @@ continue_reset:
adapter->state = __I40EVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -2239,8 +2203,14 @@ static int i40evf_open(struct net_device *netdev)
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
- return -EBUSY;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ if (adapter->state != __I40EVF_DOWN) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2234,8 @@ static int i40evf_open(struct net_device *netdev)
i40evf_irq_enable(adapter, true);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
return 0;
err_req_irq:
@@ -2273,6 +2245,8 @@ err_setup_rx:
i40evf_free_all_rx_resources(adapter);
err_setup_tx:
i40evf_free_all_tx_resources(adapter);
+err_unlock:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
@@ -2296,6 +2270,9 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2282,8 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2943,6 +2922,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
+ spin_lock_init(&adapter->mac_vlan_list_lock);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2985,6 +2966,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
if (netif_running(netdev)) {
rtnl_lock();
i40evf_down(adapter);
@@ -2993,6 +2978,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
retval = pci_save_state(pdev);
if (retval)
return retval;
@@ -3118,6 +3105,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
@@ -3130,6 +3118,8 @@ static void i40evf_remove(struct pci_dev *pdev)
kfree(f);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..feb95b62a077 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
}
if (!flags) {
- adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+ I40EVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+ I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8611763d6129..03a4df0bed96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -333,7 +333,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
- struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -397,8 +396,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -723,8 +721,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
- u16 eeprom_verh;
- u16 eeprom_verl;
+ char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
@@ -768,7 +765,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+ /* Bitmask indicating in use pools */
+ DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..1948e4208fb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -4028,6 +4028,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 && offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aaf70b3cfcd..3bcf58b27d8b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1014,16 +1014,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
- nvm_track_id = (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl;
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- nvm_track_id);
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..6e6b3c175267 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1034,11 +1034,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
- snprintf(info->firmware_version,
- sizeof(info->firmware_version),
- "0x%08x",
- (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl);
+ strlcpy(info->firmware_version, adapter->eeprom_id,
+ sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..cceafbc3f1db 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -350,6 +350,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit VMDq instances on the PF by number of Tx queues */
+ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -512,12 +515,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit l2fwd RSS based on total Tx queue limit */
+ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -525,7 +530,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+ if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -701,7 +706,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
- adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 95aba975b391..e47e0c470508 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
- u32 head, tail;
-
- if (ring->l2_accel_priv)
- adapter = ring->l2_accel_priv->real_adapter;
- else
- adapter = netdev_priv(ring->netdev);
-
- hw = &adapter->hw;
- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+ unsigned int head, tail;
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
- return 0;
+ return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -2517,13 +2512,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
@@ -2536,8 +2524,6 @@ enum latency_range {
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see ixgbe_param.c)
**/
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
@@ -3855,16 +3841,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
- unsigned int pf_pool = adapter->num_vfs;
/* Write redirection table to HW */
for (i = 0; i < reta_entries; i++) {
+ u16 pool = adapter->num_rx_pools;
+
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
- if ((i & 3) == 3) {
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ if ((i & 3) != 3)
+ continue;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
- vfreta = 0;
- }
+ vfreta = 0;
}
}
@@ -3901,13 +3891,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- *(adapter->rss_key + i));
+ for (i = 0; i < 10; i++) {
+ u16 pool = adapter->num_rx_pools;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+ *(adapter->rss_key + i));
+ }
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3973,7 +3967,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
- unsigned int pf_pool = adapter->num_vfs;
+ u16 pool = adapter->num_rx_pools;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3983,7 +3977,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFMRQC(VMDQ_P(pool)),
+ vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
@@ -4146,7 +4144,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- u16 pool;
+ u16 pool = adapter->num_rx_pools;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4163,7 +4161,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1u << 29;
- for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4490,8 +4488,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4527,8 +4526,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
@@ -5279,29 +5279,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int rss_i = adapter->num_rx_queues_per_pool;
- struct ixgbe_hw *hw = &adapter->hw;
- u16 pool = vadapter->pool;
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_L2HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (rss_i > 3)
- psrtype |= 2u << 29;
- else if (rss_i > 1)
- psrtype |= 1u << 29;
-
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5365,7 +5342,6 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
usleep_range(10000, 20000);
ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring);
- rx_ring->l2_accel_priv = NULL;
}
static int ixgbe_fwd_ring_down(struct net_device *vdev,
@@ -5383,10 +5359,8 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev,
adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
}
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
- }
return 0;
@@ -5399,14 +5373,13 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
unsigned int rxbase, txbase, queues;
int i, baseq, err = 0;
- if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
- baseq, baseq + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
accel->rx_base_queue = rxbase = baseq;
@@ -5417,14 +5390,11 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
adapter->rx_ring[rxbase + i]->netdev = vdev;
- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
}
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->tx_ring[txbase + i]->netdev = vdev;
- adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
- }
queues = min_t(unsigned int,
adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
@@ -5437,10 +5407,10 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
goto fwd_queue_err;
if (is_valid_ether_addr(vdev->dev_addr))
- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+ ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+ VMDQ_P(accel->pool));
- ixgbe_fwd_psrtype(accel);
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ ixgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->pool), adapter);
return err;
fwd_queue_err:
ixgbe_fwd_ring_down(vdev, accel);
@@ -6304,7 +6274,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
}
/* PF holds first pool slot */
- set_bit(0, &adapter->fwd_bitmask);
+ set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -6791,7 +6761,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, fctrl;
+ u32 ctrl;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
@@ -6816,18 +6786,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
+ u32 fctrl;
+
ixgbe_set_rx_mode(netdev);
/* enable the optics for 82599 SFP+ fiber as we can WoL */
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & IXGBE_WUFC_MC) {
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
+ /* enable the reception of multicast packets */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7663,6 +7633,7 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 cap_speed;
u32 speed;
bool autoneg = false;
@@ -7675,16 +7646,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
- hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
- /* setup the highest link when no autoneg */
- if (!autoneg) {
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- speed = IXGBE_LINK_SPEED_10GB_FULL;
- }
- }
+ /* advertise highest capable link speed */
+ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -8877,7 +8846,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8886,10 +8854,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
- return -EBUSY;
-
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -9052,6 +9016,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
struct upper_walk_data data;
struct net_device *upper;
@@ -9060,11 +9025,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
for (vf = 0; vf < num_vfs; ++vf) {
upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
if (upper->ifindex == ifindex) {
- if (adapter->num_rx_pools > 1)
- *queue = vf * 2;
- else
- *queue = vf * adapter->num_rx_queues_per_pool;
-
+ *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
*action = vf + 1;
*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
return 0;
@@ -9831,6 +9792,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ int tcs = netdev_get_num_tc(pdev) ? : 1;
unsigned int limit;
int pool, err;
@@ -9858,7 +9820,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
}
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
@@ -9866,10 +9828,9 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
- adapter->num_rx_pools++;
- set_bit(pool, &adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ set_bit(pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
@@ -9895,8 +9856,7 @@ fwd_add_err:
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
}
@@ -9907,10 +9867,9 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
unsigned int limit;
- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(fwd_adapter->pool, adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
@@ -9925,11 +9884,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
}
ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
fwd_adapter->pool, adapter->num_rx_pools,
fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ fwd_adapter->rx_base_queue +
+ adapter->num_rx_queues_per_pool);
kfree(fwd_adapter);
}
@@ -10243,6 +10202,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ ixgbe_get_orom_version(hw, &nvm_ver);
+
+ if (nvm_ver.or_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+ nvm_ver.or_build, nvm_ver.or_patch);
+ return;
+ }
+
+ /* Set ETrack ID format */
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", nvm_ver.etk_id);
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10578,8 +10572,7 @@ skip_sriov:
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+ ixgbe_set_fw_version(adapter);
/* pick up the PCI bus settings for reporting later */
if (ixgbe_pcie_from_parent(hw))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..0085f4632966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -227,9 +227,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gpie;
- u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +268,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
- /* turn off device IOV mode */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +290,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
- int err = 0;
- u8 num_tc;
- int i;
int pre_existing_vfs = pci_num_vf(dev);
+ int err = 0, num_rx_pools, i, limit;
+ u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
@@ -331,22 +315,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = netdev_get_num_tc(adapter->netdev);
+ num_rx_pools = adapter->num_rx_pools;
+ limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+ (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
- if (num_tc > 4) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
- return -EPERM;
- }
- } else if ((num_tc > 1) && (num_tc <= 4)) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
- return -EPERM;
- }
- } else {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
- return -EPERM;
- }
+ if (num_vfs > (limit - num_rx_pools)) {
+ e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+ num_tc, num_rx_pools - 1, limit - num_rx_pools);
+ return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +354,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+ int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
- if (!err && current_flags != adapter->flags)
+ if (!err && (current_flags != adapter->flags ||
+ prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..21eb79ae3c30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..573f743b556a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1896,10 +1896,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
- xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
- (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
- IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
+ if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+ en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ MLX4_EN_MIN_RX_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+ en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+ __func__, param->tx_pending,
+ MLX4_EN_MIN_TX_SIZE);
+ return -EINVAL;
+ }
+
rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5299310f2481..d629da213511 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -227,12 +228,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -243,8 +238,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -254,7 +249,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -473,32 +468,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -559,7 +528,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -659,6 +628,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -864,10 +834,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
@@ -1114,4 +1080,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8f05efa5c829..bd5af7f37198 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -296,7 +296,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
- u32 max_rq_size;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -315,8 +314,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
- max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
@@ -326,12 +323,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > max_rq_size) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
- __func__, param->rx_pending,
- max_rq_size);
- return -EINVAL;
- }
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -347,12 +338,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
- if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
- return -EINVAL;
- }
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
@@ -480,7 +465,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -534,7 +519,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -542,7 +527,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 539bd1d24396..bbbdb5c0086b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -582,7 +582,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- if (xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix) < 0)
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
@@ -677,8 +678,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -925,7 +935,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -958,7 +968,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1571,7 +1581,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1753,7 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
@@ -2005,7 +2015,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -4047,9 +4057,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4111,7 +4130,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c6a77f8e99a4..4d1b0ff4b6e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -884,7 +884,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7b38480811d4..ff234dfefc27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
return;
}
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -1176,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1195,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1215,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index e401d9d245f3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 933275fe03b2..cf528da51243 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -56,12 +56,14 @@ struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
};
struct mlx5e_tc_flow {
@@ -71,6 +73,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +96,25 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ u32 tdn;
+ u32 tirn;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ int peer_ifindex;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +244,187 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ return hp;
+
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ int peer_ifindex)
+{
+ struct mlx5e_hairpin_entry *hpe;
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, peer_ifindex) {
+ if (hpe->peer_ifindex == peer_ifindex)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ hpe = mlx5e_hairpin_get(priv, peer_ifindex);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_ifindex = peer_ifindex;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+ params.q_counter = priv->q_counter;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n",
+ hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn, params.log_data_size);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_ifindex);
+
+attach_flow:
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ list_add(&flow->hairpin, &hpe->flows);
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +432,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +441,33 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ }
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -292,7 +510,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +527,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +551,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -1422,6 +1646,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1466,6 +1704,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -2188,6 +2443,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7649e36653d9..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -1123,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1188,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1210,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1262,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1457,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1483,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1521,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -2016,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2076,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3b481182f13a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -73,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -81,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 3e571045626f..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8812d7208e8f..3b2363e93ba5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -396,7 +395,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..a09ebbaf3b68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,187 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int err;
+
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn);
+ if (err)
+ goto out_err_rq;
+
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn);
+ if (err)
+ goto out_err_sq;
+
+ return 0;
+
+out_err_sq:
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+out_err_rq:
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn);
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int err;
+
+ /* set peer SQ */
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn,
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn);
+ if (err)
+ goto err_modify_sq;
+
+ /* set func RQ */
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn,
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn);
+
+ if (err)
+ goto err_modify_rq;
+
+ return 0;
+
+err_modify_rq:
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+err_modify_sq:
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ /* unset func RQ */
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQ */
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d373df7b11bd..54c7d9202e81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -358,7 +358,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is out of date\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
@@ -3085,6 +3085,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3113,6 +3120,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3148,6 +3157,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -4422,7 +4432,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack,
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
@@ -4550,6 +4563,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
@@ -4566,7 +4580,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0adcd886589..b6f475e83474 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -204,29 +204,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,7 +246,7 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
unsigned acl_rule_count;
};
@@ -366,6 +343,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -582,6 +561,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..273300b75a68 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,6 +41,138 @@
#include "spectrum.h"
#include "reg.h"
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -79,80 +211,76 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -161,116 +289,132 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
-
- mlxsw_sp_qdisc->handle = handle;
- return 0;
-
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
u64 tx_bytes, tx_packets, overlimits, drops;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ xstats->backlog[tclass_num]);
+
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index be657b8533f0..434b3922b34f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
{
if (!removing)
nh->should_offload = 1;
- else if (nh->offloaded)
+ else
nh->should_offload = 0;
nh->update = 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
return NULL;
}
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index a79b9f814844..21e15cb2f62e 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,7 +793,7 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- union ring_type get_rx, put_rx, first_rx, last_rx;
+ union ring_type get_rx, put_rx, last_rx;
struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
struct nv_skb_map *rx_skb;
@@ -1812,7 +1812,7 @@ static int nv_alloc_rx(struct net_device *dev)
struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
- if (less_rx-- == np->first_rx.orig)
+ if (less_rx-- == np->rx_ring.orig)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
@@ -1833,7 +1833,7 @@ static int nv_alloc_rx(struct net_device *dev)
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
- np->put_rx.orig = np->first_rx.orig;
+ np->put_rx.orig = np->rx_ring.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
@@ -1853,7 +1853,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
- if (less_rx-- == np->first_rx.ex)
+ if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
@@ -1875,7 +1875,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
wmb();
np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
- np->put_rx.ex = np->first_rx.ex;
+ np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
@@ -1903,7 +1903,8 @@ static void nv_init_rx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ np->get_rx = np->rx_ring;
+ np->put_rx = np->rx_ring;
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
@@ -2911,7 +2912,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
- np->get_rx.orig = np->first_rx.orig;
+ np->get_rx.orig = np->rx_ring.orig;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
np->get_rx_ctx = np->first_rx_ctx;
@@ -3000,7 +3001,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
next_pkt:
if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
- np->get_rx.ex = np->first_rx.ex;
+ np->get_rx.ex = np->rx_ring.ex;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
np->get_rx_ctx = np->first_rx_ctx;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index cedacddf50fb..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,7 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- int ingress_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
+ u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -185,11 +185,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct ifla_vlan_flags *flags;
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- ingress_format = flags->flags & flags->mask;
+ data_format = flags->flags & flags->mask;
}
- netdev_dbg(dev, "data format [ingress 0x%08X]\n", ingress_format);
- port->ingress_data_format = ingress_format;
+ netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+ port->data_format = data_format;
return 0;
@@ -353,7 +353,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct ifla_vlan_flags *flags;
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- port->ingress_data_format = flags->flags & flags->mask;
+ port->data_format = flags->flags & flags->mask;
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 2ea9fe326571..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,7 +32,7 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
- u32 ingress_data_format;
+ u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 05539321ba3a..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
+#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
@@ -65,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
+ u16 len, pad;
u8 mux_id;
- u16 len;
if (RMNET_MAP_GET_CD_BIT(skb)) {
- if (port->ingress_data_format
- & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+ pad = RMNET_MAP_GET_PAD(skb);
+ len = RMNET_MAP_GET_LENGTH(skb) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@@ -90,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
- skb_trim(skb, len);
rmnet_set_skb_proto(skb);
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@@ -114,8 +121,8 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
skb_push(skb, ETH_HLEN);
}
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
- while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+ if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
@@ -134,19 +141,24 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+ additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+ required_headroom += additional_header_len;
+ }
+
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
goto fail;
}
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
goto fail;
- if (mux_id == 0xff)
- map_header->mux_id = 0;
- else
- map_header->mux_id = mux_id;
+ map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
@@ -208,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_priv *priv;
u8 mux_id;
+ sk_pacing_shift_update(skb->sk, 8);
+
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 4df359de28c5..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -47,6 +47,22 @@ struct rmnet_map_header {
u16 pkt_len;
} __aligned(1);
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 valid:1;
+ u8 reserved2:7;
+ u16 csum_start_offset;
+ u16 csum_length;
+ __be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ u16 csum_insert_offset:14;
+ u16 udp_ip4_ind:1;
+ u16 csum_enabled:1;
+} __aligned(1);
+
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -67,10 +83,13 @@ struct rmnet_map_header {
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
static void rmnet_map_send_ack(struct sk_buff *skb,
- unsigned char type)
+ unsigned char type,
+ struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
int xmit_status;
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (skb->len < sizeof(struct rmnet_map_header) +
+ RMNET_MAP_GET_LENGTH(skb) +
+ sizeof(struct rmnet_map_dl_csum_trailer)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_trim(skb, skb->len -
+ sizeof(struct rmnet_map_dl_csum_trailer));
+ }
+
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
- rmnet_map_send_ack(skb, rc);
+ rmnet_map_send_ack(skb, rc, port);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
*/
#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -21,6 +24,233 @@
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ __sum16 *check = NULL;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ check = &(((struct tcphdr *)txporthdr)->check);
+ break;
+
+ case IPPROTO_UDP:
+ check = &(((struct udphdr *)txporthdr)->check);
+ break;
+
+ default:
+ check = NULL;
+ break;
+ }
+
+ return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+ u16 csum_value, csum_value_final;
+ struct iphdr *ip4h;
+ void *txporthdr;
+ __be16 addend;
+
+ ip4h = (struct iphdr *)(skb->data);
+ if ((ntohs(ip4h->frag_off) & IP_MF) ||
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ return -EOPNOTSUPP;
+
+ txporthdr = skb->data + ip4h->ihl * 4;
+
+ csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ return 0;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+ ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+ (__force __be16)hdr_csum);
+
+ pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip4h->protocol) {
+ case IPPROTO_UDP:
+ /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL4 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+ u16 csum_value, csum_value_final;
+ __be16 ip6_hdr_csum, addend;
+ struct ipv6hdr *ip6h;
+ void *txporthdr;
+ u32 length;
+
+ ip6h = (struct ipv6hdr *)(skb->data);
+
+ txporthdr = skb->data + sizeof(struct ipv6hdr);
+ csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ ip6_hdr_csum = (__force __be16)
+ ~ntohs((__force __be16)ip_compute_csum(ip6h,
+ (int)(txporthdr - (void *)(skb->data))));
+ ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+ ip6_hdr_csum);
+
+ length = (ip6h->nexthdr == IPPROTO_UDP) ?
+ ntohs(((struct udphdr *)txporthdr)->len) :
+ ntohs(ip6h->payload_len);
+ pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ length, ip6h->nexthdr, 0));
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip6h->nexthdr) {
+ case IPPROTO_UDP:
+ /* RFC 2460 section 8.1
+ * DL6 One's complement rule for UDP checksum 0
+ */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL6 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = iphdr + ip4h->ihl * 4;
+
+ if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)iphdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+ ul_header->udp_ip4_ind = 1;
+ else
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+ if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)ip6hdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
u32 padding, map_datalen;
u8 *padbytes;
- if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
- return NULL;
-
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+ packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
return skbn;
}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+ struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return -EOPNOTSUPP;
+
+ csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+ if (!csum_trailer->valid)
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+ return -EPROTONOSUPPORT;
+#endif
+
+ return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
+{
+ struct rmnet_map_ul_csum_header *ul_header;
+ void *iphdr;
+
+ ul_header = (struct rmnet_map_ul_csum_header *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+ goto sw_csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ return;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ return;
+#else
+ goto sw_csum;
+#endif
+ }
+ }
+
+sw_csum:
+ ul_header->csum_start_offset = 0;
+ ul_header->csum_insert_offset = 0;
+ ul_header->csum_enabled = 0;
+ ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index d21428078504..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -21,6 +21,8 @@
/* Constants */
#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 5bb29f44d114..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -188,6 +188,10 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
if (rmnet_get_endpoint(port, id))
return -EBUSY;
+ rmnet_dev->hw_features = NETIF_F_RXCSUM;
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e87de2..81045dfa1cd8 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss > MSSMask) {
- WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
- mss);
+ netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
goto out_dma_error;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 857f67bebc6a..272c5962e4f7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void __rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
{
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
- if (pm)
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
if (net_ratelimit())
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- if (pm)
- pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+ pm_runtime_idle(&tp->pci_dev->dev);
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
-{
- __rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -7746,7 +7737,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
rtl_irq_enable_all(tp);
}
@@ -7959,7 +7950,7 @@ static int rtl_open(struct net_device *dev)
rtl_unlock_work(tp);
tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
rtl8169_check_link_status(dev, tp, ioaddr);
out:
@@ -8103,8 +8094,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
+ if (!tp->TxDescArray) {
+ rtl_pll_power_down(tp);
return 0;
+ }
rtl_lock_work(tp);
tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8146,9 +8139,11 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- return tp->TxDescArray ? -EBUSY : 0;
+ if (!netif_running(dev) || !netif_carrier_ok(dev))
+ pm_schedule_suspend(device, 10000);
+
+ return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8195,9 +8190,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = &pdev->dev;
-
- pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -8215,8 +8207,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
-
- pm_runtime_put_noidle(d);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -8701,11 +8691,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
netif_carrier_off(dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d47bbbb22e7c..7aa1c12750b3 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* ioremap the TSU registers */
if (mdp->cd->tsu) {
struct resource *rtsu;
+
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
- if (IS_ERR(mdp->tsu_addr)) {
- ret = PTR_ERR(mdp->tsu_addr);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "no TSU resource\n");
+ ret = -ENODEV;
+ goto out_release;
+ }
+ /* We can only request the TSU region for the first port
+ * of the two sharing this TSU for the probe to succeed...
+ */
+ if (devno % 2 == 0 &&
+ !devm_request_mem_region(&pdev->dev, rtsu->start,
+ resource_size(rtsu),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "can't request TSU resource.\n");
+ ret = -EBUSY;
+ goto out_release;
+ }
+ mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+ resource_size(rtsu));
+ if (!mdp->tsu_addr) {
+ dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+ ret = -ENOMEM;
goto out_release;
}
mdp->port = devno % 2;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* initialize first or needed device */
- if (!devno || pd->needs_init) {
+ /* Need to init only the first port of the two sharing a TSU */
+ if (devno % 2 == 0) {
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3780161de5a1..12f0abc30cb1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -953,31 +953,42 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
{
- efx->link_advertising = advertising;
- if (advertising) {
- if (advertising & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
- }
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
- if (efx->link_advertising) {
+ if (efx->link_advertising[0]) {
if (wanted_fc & EFX_FC_RX)
- efx->link_advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
else
- efx->link_advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
- efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
}
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 16da3e9a6000..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -258,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
}
void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
efx->type->prepare_enable_fc_tx(efx);
- old_adv = efx->link_advertising;
+ old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
+ if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 65ee1a468170..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
- u32 result = 0;
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
- result |= SUPPORTED_Backplane;
+ SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseKX_Full;
+ SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseKX4_Full;
+ SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseKR4_Full;
+ SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
- result |= SUPPORTED_FIBRE;
+ SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseCR4_Full;
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
- result |= SUPPORTED_TP;
+ SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- result |= SUPPORTED_10baseT_Half;
+ SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- result |= SUPPORTED_10baseT_Full;
+ SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- result |= SUPPORTED_100baseT_Half;
+ SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- result |= SUPPORTED_100baseT_Full;
+ SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- result |= SUPPORTED_1000baseT_Half;
+ SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- result |= SUPPORTED_Pause;
+ SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- result |= SUPPORTED_Asym_Pause;
+ SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- result |= SUPPORTED_Autoneg;
+ SET_BIT(Autoneg);
- return result;
+ #undef SET_BIT
}
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
- if (cap & SUPPORTED_10baseT_Half)
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (cap & SUPPORTED_10baseT_Full)
+ if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (cap & SUPPORTED_100baseT_Half)
+ if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (cap & SUPPORTED_100baseT_Full)
+ if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (cap & SUPPORTED_1000baseT_Half)
+ if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (cap & SUPPORTED_Pause)
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (cap & SUPPORTED_Asym_Pause)
+ if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (cap & SUPPORTED_Autoneg)
+ if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+ #undef TEST_BIT
+
return result;
}
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
else
phy_data->forced_cap = caps;
@@ -435,8 +454,8 @@ fail:
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps = (efx->link_advertising ?
- ethtool_to_mcdi_cap(efx->link_advertising) :
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
- u32 supported, advertising, lp_advertising;
- supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- advertising = efx->link_advertising;
cmd->base.speed = efx->link_state.speed;
cmd->base.duplex = efx->link_state.fd;
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- lp_advertising =
- mcdi_to_ethtool_cap(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
}
static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if (cmd->base.autoneg) {
- caps = (ethtool_to_mcdi_cap(advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (cmd->base.duplex) {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
}
} else {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
}
}
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return rc;
if (cmd->base.autoneg) {
- efx_link_set_advertising(
- efx, advertising | ADVERTISED_Autoneg);
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_set_advertising(efx, 0);
+ efx_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 4cedc5c4c6d9..3dd42f3136fe 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -937,7 +937,7 @@ struct efx_nic {
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- u32 link_advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
struct efx_link_state link_state;
unsigned int n_link_state_changes;
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
index 3a1829e59e24..6bcfe27fc560 100644
--- a/drivers/net/ethernet/socionext/Kconfig
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -19,4 +19,16 @@ config SNI_AVE
Driver for gigabit ethernet MACs, called AVE, in the
Socionext UniPhier family.
+config SNI_NETSEC
+ tristate "Socionext NETSEC ethernet support"
+ depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+ select PHYLIB
+ select MII
+ ---help---
+ Enable to add support for the SocioNext NetSec Gigabit Ethernet
+ controller + PHY, as found on the Synquacer SC2A11 SoC
+
+ To compile this driver as a module, choose M here: the module will be
+ called netsec. If unsure, say N.
+
endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
index ab83df63b670..7fd837a999fd 100644
--- a/drivers/net/ethernet/socionext/Makefile
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -3,3 +3,4 @@
# Makefile for all ethernet ip drivers on Socionext platforms
#
obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..6c263af86b8a
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST 0x104
+#define NETSEC_REG_COM_INIT 0x120
+
+#define NETSEC_REG_TOP_STATUS 0x200
+#define NETSEC_IRQ_RX BIT(1)
+#define NETSEC_IRQ_TX BIT(0)
+
+#define NETSEC_REG_TOP_INTEN 0x204
+#define NETSEC_REG_INTEN_SET 0x234
+#define NETSEC_REG_INTEN_CLR 0x238
+
+#define NETSEC_REG_NRM_TX_STATUS 0x400
+#define NETSEC_REG_NRM_TX_INTEN 0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
+#define NRM_TX_ST_NTOWNR BIT(17)
+#define NRM_TX_ST_TR_ERR BIT(16)
+#define NRM_TX_ST_TXDONE BIT(15)
+#define NRM_TX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS 0x440
+#define NETSEC_REG_NRM_RX_INTEN 0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
+#define NRM_RX_ST_RC_ERR BIT(16)
+#define NRM_RX_ST_PKTCNT BIT(15)
+#define NRM_RX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF 0xd0
+
+#define NETSEC_REG_CLK_EN 0x100
+
+#define NETSEC_REG_PKT_CTRL 0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL 0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
+#define NETSEC_REG_F_TAIKI_VER 0x230
+#define NETSEC_REG_DMA_HM_CTRL 0x214
+#define NETSEC_REG_DMA_MH_CTRL 0x220
+#define NETSEC_REG_ADDR_DIS_CORE 0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT 0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
+
+#define NETSEC_REG_NRM_TX_TMR 0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT 0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
+
+#define NETSEC_REG_NRM_RX_TMR 0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG 0x430
+#define NETSEC_REG_NRM_RX_CONFIG 0x470
+
+#define MAC_REG_STATUS 0x1024
+#define MAC_REG_DATA 0x11c0
+#define MAC_REG_CMD 0x11c4
+#define MAC_REG_FLOW_TH 0x11cc
+#define MAC_REG_INTF_SEL 0x11d4
+#define MAC_REG_DESC_INIT 0x11fc
+#define MAC_REG_DESC_SOFT_RST 0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
+
+#define GMAC_REG_MCR 0x0000
+#define GMAC_REG_MFFR 0x0004
+#define GMAC_REG_GAR 0x0010
+#define GMAC_REG_GDR 0x0014
+#define GMAC_REG_FCR 0x0018
+#define GMAC_REG_BMR 0x1000
+#define GMAC_REG_RDLAR 0x100c
+#define GMAC_REG_TDLAR 0x1010
+#define GMAC_REG_OMR 0x1018
+
+#define MHZ(n) ((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD 31
+#define NETSEC_TX_SHIFT_LD_FIELD 30
+#define NETSEC_TX_SHIFT_DRID_FIELD 24
+#define NETSEC_TX_SHIFT_PT_FIELD 21
+#define NETSEC_TX_SHIFT_TDRID_FIELD 16
+#define NETSEC_TX_SHIFT_CC_FIELD 15
+#define NETSEC_TX_SHIFT_FS_FIELD 9
+#define NETSEC_TX_LAST 8
+#define NETSEC_TX_SHIFT_CO 7
+#define NETSEC_TX_SHIFT_SO 6
+#define NETSEC_TX_SHIFT_TRS_FIELD 4
+
+#define NETSEC_RX_PKT_OWN_FIELD 31
+#define NETSEC_RX_PKT_LD_FIELD 30
+#define NETSEC_RX_PKT_SDRID_FIELD 24
+#define NETSEC_RX_PKT_FR_FIELD 23
+#define NETSEC_RX_PKT_ER_FIELD 21
+#define NETSEC_RX_PKT_ERR_FIELD 16
+#define NETSEC_RX_PKT_TDRID_FIELD 12
+#define NETSEC_RX_PKT_FS_FIELD 9
+#define NETSEC_RX_PKT_LS_FIELD 8
+#define NETSEC_RX_PKT_CO_FIELD 6
+
+#define NETSEC_RX_PKT_ERR_MASK 3
+
+#define NETSEC_MAX_TX_PKT_LEN 1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
+
+#define NETSEC_RING_GMAC 15
+#define NETSEC_RING_MAX 2
+
+#define NETSEC_TCP_SEG_LEN_MAX 1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL 0
+#define NETSEC_RX_CKSUM_OK 1
+#define NETSEC_RX_CKSUM_NG 2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX 2047
+
+#define NETSEC_FLOW_START_TH_MAX 95
+#define NETSEC_FLOW_STOP_TH_MAX 95
+#define NETSEC_FLOW_PAUSE_TIME_MIN 5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB BIT(2)
+#define NETSEC_COM_INIT_REG_CLS BIT(1)
+#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
+ NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET 0
+#define NETSEC_SOFT_RST_REG_RUN BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP 1
+#define MH_CTRL__MODE_TRANS BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ 0
+#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE BIT(20)
+#define NETSEC_MCR_PS BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
+
+#define NETSEC_FCR_RFE BIT(2)
+#define NETSEC_FCR_TFE BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
+#define GMAC_REG_SHIFT_CR_GAR 2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE 4
+#define NETSEC_REG_DESC_ENDIAN 0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
+#define NETSEC_MAC_DESC_INIT_REG_INIT 1
+
+#define NETSEC_EEPROM_MAC_ADDRESS 0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE 0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
+#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
+
+#define DESC_NUM 128
+#define NAPI_BUDGET (DESC_NUM / 2)
+
+#define DESC_SZ sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+
+enum ring_id {
+ NETSEC_RING_TX = 0,
+ NETSEC_RING_RX
+};
+
+struct netsec_desc {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *addr;
+ u16 len;
+};
+
+struct netsec_desc_ring {
+ phys_addr_t desc_phys;
+ struct netsec_desc *desc;
+ void *vaddr;
+ u16 pkt_cnt;
+ u16 head, tail;
+};
+
+struct netsec_priv {
+ struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+ struct ethtool_coalesce et_coalesce;
+ spinlock_t reglock; /* protect reg access */
+ struct napi_struct napi;
+ phy_interface_t phy_interface;
+ struct net_device *ndev;
+ struct device_node *phy_np;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ void __iomem *ioaddr;
+ void __iomem *eeprom_base;
+ struct device *dev;
+ struct clk *clk;
+ u32 msg_enable;
+ u32 freq;
+ bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+ u32 attr;
+ u32 data_buf_addr_up;
+ u32 data_buf_addr_lw;
+ u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+ u16 tcp_seg_len;
+ bool tcp_seg_offload_flag;
+ bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+ int rx_cksum_result;
+ int err_code;
+ bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+ writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+ return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC 1000
+#define TIMEOUT_SECONDARY_MS_MAC 100
+
+static u32 netsec_clk_type(u32 freq)
+{
+ if (freq < MHZ(35))
+ return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+ if (freq < MHZ(60))
+ return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+ if (freq < MHZ(100))
+ return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+ if (freq < MHZ(150))
+ return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+ if (freq < MHZ(250))
+ return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+ return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+
+ while (--timeout && netsec_read(priv, addr) & mask)
+ cpu_relax();
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ while (--timeout && netsec_read(priv, addr) & mask)
+ usleep_range(1000, 2000);
+
+ if (timeout)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+ netsec_write(priv, MAC_REG_DATA, value);
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+ return netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+ int ret;
+
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+ ret = netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+ if (ret)
+ return ret;
+
+ *read = netsec_read(priv, MAC_REG_DATA);
+
+ return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+ u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+ int ret, data;
+
+ do {
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ do {
+ usleep_range(1000, 2000);
+
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout && !ret)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+
+ value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+ if (phydev->speed != SPEED_1000)
+ value |= NETSEC_MCR_PS;
+
+ if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+ phydev->speed == SPEED_100)
+ value |= NETSEC_GMAC_MCR_REG_FES;
+
+ value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ value |= NETSEC_GMAC_MCR_REG_IBN;
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+{
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_GAR,
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+ struct netsec_priv *priv = bus->priv;
+ u32 data;
+ int ret;
+
+ if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "netsec", sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ *et_coalesce = priv->et_coalesce;
+
+ return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ priv->et_coalesce = *et_coalesce;
+
+ if (priv->et_coalesce.tx_coalesce_usecs < 50)
+ priv->et_coalesce.tx_coalesce_usecs = 50;
+ if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+ priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+ priv->et_coalesce.tx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+ priv->et_coalesce.tx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+ if (priv->et_coalesce.rx_coalesce_usecs < 50)
+ priv->et_coalesce.rx_coalesce_usecs = 50;
+ if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+ priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+ priv->et_coalesce.rx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+ priv->et_coalesce.rx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+ return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+ .get_drvinfo = netsec_et_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = netsec_et_get_coalesce,
+ .set_coalesce = netsec_et_set_coalesce,
+ .get_msglevel = netsec_et_get_msglevel,
+ .set_msglevel = netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+ struct netsec_desc *desc)
+{
+ struct sk_buff *skb;
+
+ if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+ skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+ } else {
+ desc->len = L1_CACHE_ALIGN(desc->len);
+ skb = netdev_alloc_skb(priv->ndev, desc->len);
+ }
+ if (!skb)
+ return NULL;
+
+ desc->addr = skb->data;
+ desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring, u16 idx,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+ u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+ (1 << NETSEC_RX_PKT_FS_FIELD) |
+ (1 << NETSEC_RX_PKT_LS_FIELD);
+
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx].dma_addr = desc->dma_addr;
+ dring->desc[idx].addr = desc->addr;
+ dring->desc[idx].len = desc->len;
+ dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ u16 idx,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc, u16 *len)
+{
+ struct netsec_de de = {};
+
+ memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+ *len = de.buf_len_info >> 16;
+
+ rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+ rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ *desc = dring->desc[idx];
+ return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc,
+ u16 *len)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct sk_buff *tmp_skb, *skb = NULL;
+ struct netsec_desc td;
+ int tail;
+
+ *rxpi = (struct netsec_rx_pkt_info){};
+
+ td.len = priv->ndev->mtu + 22;
+
+ tmp_skb = netsec_alloc_skb(priv, &td);
+
+ dma_rmb();
+
+ tail = dring->tail;
+
+ if (!tmp_skb) {
+ netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+ dring->desc[tail].skb);
+ } else {
+ skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+ netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+ }
+
+ /* move tail ahead */
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+
+ dring->pkt_cnt--;
+
+ return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ unsigned int pkts, bytes;
+
+ dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ if (dring->pkt_cnt < budget)
+ budget = dring->pkt_cnt;
+
+ pkts = 0;
+ bytes = 0;
+
+ while (pkts < budget) {
+ struct netsec_desc *desc;
+ struct netsec_de *entry;
+ int tail, eop;
+
+ tail = dring->tail;
+
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ desc = &dring->desc[tail];
+ entry = dring->vaddr + DESC_SZ * tail;
+
+ eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ if (eop) {
+ pkts++;
+ bytes += desc->skb->len;
+ dev_kfree_skb(desc->skb);
+ }
+ *desc = (struct netsec_desc){};
+ }
+ dring->pkt_cnt -= budget;
+
+ priv->ndev->stats.tx_packets += budget;
+ priv->ndev->stats.tx_bytes += bytes;
+
+ netdev_completed_queue(priv->ndev, budget, bytes);
+
+ return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ int new, done = 0;
+
+ do {
+ new = netsec_clean_tx_dring(priv, budget);
+ done += new;
+ budget -= new;
+ } while (new);
+
+ if (done && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct net_device *ndev = priv->ndev;
+ struct netsec_rx_pkt_info rx_info;
+ int done = 0, rx_num = 0;
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ u16 len;
+
+ while (done < budget) {
+ if (!rx_num) {
+ rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+ dring->pkt_cnt += rx_num;
+
+ /* move head 'rx_num' */
+ dring->head = (dring->head + rx_num) % DESC_NUM;
+
+ rx_num = dring->pkt_cnt;
+ if (!rx_num)
+ break;
+ }
+ done++;
+ rx_num--;
+ skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+ if (unlikely(!skb) || rx_info.err_flag) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: rx fail err(%d)\n",
+ __func__, rx_info.err_code);
+ ndev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+
+ if (priv->rx_cksum_offload_flag &&
+ rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct netsec_priv *priv;
+ struct net_device *ndev;
+ int tx, rx, done, todo;
+
+ priv = container_of(napi, struct netsec_priv, napi);
+ ndev = priv->ndev;
+
+ todo = budget;
+ do {
+ if (!todo)
+ break;
+
+ tx = netsec_process_tx(priv, todo);
+ todo -= tx;
+
+ if (!todo)
+ break;
+
+ rx = netsec_process_rx(priv, todo);
+ todo -= rx;
+ } while (rx || tx);
+
+ done = budget - todo;
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_SET,
+ NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ dring->desc[idx].skb = skb;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ u16 tso_seg_len = 0;
+ int filled;
+
+ /* differentiate between full/emtpy ring */
+ if (dring->head >= dring->tail)
+ filled = dring->head - dring->tail;
+ else
+ filled = dring->head + DESC_NUM - dring->tail;
+
+ if (DESC_NUM - filled < 2) { /* if less than 2 available */
+ netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ctrl.cksum_offload_flag = true;
+
+ if (skb_is_gso(skb))
+ tso_seg_len = skb_shinfo(skb)->gso_size;
+
+ if (tso_seg_len > 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(0, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ tx_ctrl.tcp_seg_offload_flag = true;
+ tx_ctrl.tcp_seg_len = tso_seg_len;
+ }
+
+ tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: DMA mapping failed\n", __func__);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tx_desc.addr = skb->data;
+ tx_desc.len = skb_headlen(skb);
+
+ skb_tx_timestamp(skb);
+ netdev_sent_queue(priv->ndev, skb->len);
+
+ netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ struct netsec_desc *desc;
+ u16 idx;
+
+ if (!dring->vaddr || !dring->desc)
+ return;
+
+ for (idx = 0; idx < DESC_NUM; idx++) {
+ desc = &dring->desc[idx];
+ if (!desc->addr)
+ continue;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ dev_kfree_skb(desc->skb);
+ }
+
+ memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+ memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+ dring->head = 0;
+ dring->tail = 0;
+ dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+ if (dring->vaddr) {
+ dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ dring->vaddr, dring->desc_phys);
+ dring->vaddr = NULL;
+ }
+
+ kfree(dring->desc);
+ dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ int ret = 0;
+
+ dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_phys, GFP_KERNEL);
+ if (!dring->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+ if (!dring->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ netsec_free_dring(priv, id);
+
+ return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ int n;
+
+ desc.len = priv->ndev->mtu + 22;
+
+ for (n = 0; n < DESC_NUM; n++) {
+ skb = netsec_alloc_skb(priv, &desc);
+ if (!skb) {
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return -ENOMEM;
+ }
+ netsec_set_rx_de(priv, dring, n, &desc, skb);
+ }
+
+ return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+ u32 addr_h, u32 addr_l, u32 size)
+{
+ u64 base = (u64)addr_h << 32 | addr_l;
+ void __iomem *ucode;
+ u32 i;
+
+ ucode = ioremap(base, size * sizeof(u32));
+ if (!ucode)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++)
+ netsec_write(priv, reg, readl(ucode + i * 4));
+
+ iounmap(ucode);
+ return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+ u32 addr_h, addr_l, size;
+ int err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = 0;
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+ u32 value;
+ int err;
+
+ /* stop DMA engines */
+ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+ netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+
+ while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+
+ while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+ }
+
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+ netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+ while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+ cpu_relax();
+
+ /* set desc_start addr */
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys));
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys));
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys));
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys));
+
+ /* set normal tx dring ring config */
+ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+ netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* start DMA engines */
+ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+ netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+ usleep_range(1000, 2000);
+
+ if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+ netif_err(priv, probe, priv->ndev,
+ "microengine start failed\n");
+ return -ENXIO;
+ }
+ netsec_write(priv, NETSEC_REG_TOP_STATUS,
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+ value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+ if (priv->ndev->mtu > ETH_DATA_LEN)
+ value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+ /* change to normal mode */
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+ netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+ while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+ NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+ cpu_relax();
+
+ /* clear any pending EMPTY/ERR irq status */
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+ /* Disable TX & RX intr */
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+ return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+ int ret;
+
+ if (phydev->speed != SPEED_1000)
+ value = (NETSEC_GMAC_MCR_REG_CST |
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_RESET))
+ return -ETIMEDOUT;
+
+ /* Wait soft reset */
+ usleep_range(1000, 5000);
+
+ ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+ if (ret)
+ return ret;
+ if (value & NETSEC_GMAC_BMR_REG_SWR)
+ return -EAGAIN;
+
+ netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+ return -ETIMEDOUT;
+
+ netsec_write(priv, MAC_REG_DESC_INIT, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+ return -ETIMEDOUT;
+
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+ NETSEC_GMAC_RDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+ NETSEC_GMAC_TDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_update_to_phy_state(priv);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+
+ value |= NETSEC_GMAC_OMR_REG_SR;
+ value |= NETSEC_GMAC_OMR_REG_ST;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+ if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+ u32 value;
+ int ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+ value &= ~NETSEC_GMAC_OMR_REG_SR;
+ value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+ /* disable all interrupts */
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev->link)
+ netsec_start_gmac(priv);
+ else
+ netsec_stop_gmac(priv);
+
+ phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+ struct netsec_priv *priv = dev_id;
+ u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+ unsigned long flags;
+
+ /* Disable interrupts */
+ if (status & NETSEC_IRQ_TX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+ }
+ if (status & NETSEC_IRQ_RX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: fail setup ring\n", __func__);
+ goto err1;
+ }
+
+ ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+ IRQF_SHARED, "netsec", priv);
+ if (ret) {
+ netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+ goto err2;
+ }
+
+ if (dev_of_node(priv->dev)) {
+ if (!of_phy_connect(priv->ndev, priv->phy_np,
+ netsec_phy_adjust_link, 0,
+ priv->phy_interface)) {
+ netif_err(priv, link, priv->ndev, "missing PHY\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+ } else {
+ ret = phy_connect_direct(priv->ndev, priv->phydev,
+ netsec_phy_adjust_link,
+ priv->phy_interface);
+ if (ret) {
+ netif_err(priv, link, priv->ndev,
+ "phy_connect_direct() failed (%d)\n", ret);
+ goto err3;
+ }
+ }
+
+ phy_start(ndev->phydev);
+
+ netsec_start_gmac(priv);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ /* Enable RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+ return 0;
+err3:
+ free_irq(priv->ndev->irq, priv);
+err2:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+ pm_runtime_put_sync(priv->dev);
+ return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+
+ napi_disable(&priv->napi);
+
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+ netsec_stop_gmac(priv);
+
+ free_irq(priv->ndev->irq, priv);
+
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+
+ pm_runtime_put_sync(priv->dev);
+
+ return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+ if (ret)
+ return ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+ if (ret)
+ goto err1;
+
+ ret = netsec_reset_hardware(priv);
+ if (ret)
+ goto err2;
+
+ return 0;
+err2:
+ netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+ netsec_free_dring(priv, NETSEC_RING_TX);
+ return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netsec_free_dring(priv, NETSEC_RING_RX);
+ netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+ return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+ .ndo_init = netsec_netdev_init,
+ .ndo_uninit = netsec_netdev_uninit,
+ .ndo_open = netsec_netdev_open,
+ .ndo_stop = netsec_netdev_stop,
+ .ndo_start_xmit = netsec_netdev_start_xmit,
+ .ndo_set_features = netsec_netdev_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+ struct netsec_priv *priv)
+{
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk not found\n");
+ return PTR_ERR(priv->clk);
+ }
+ priv->freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+ struct netsec_priv *priv, u32 *phy_addr)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "missing required property 'phy-channel'\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev,
+ "socionext,phy-clock-frequency",
+ &priv->freq);
+ if (ret)
+ dev_err(&pdev->dev,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!dev_of_node(priv->dev) && phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
+
+ mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "SNI NETSEC MDIO";
+ bus->read = netsec_phy_read;
+ bus->write = netsec_phy_write;
+ bus->parent = priv->dev;
+ priv->mii_bus = bus;
+
+ if (dev_of_node(priv->dev)) {
+ struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+ mdio_node = of_get_child_by_name(parent, "mdio");
+ if (mdio_node) {
+ parent = mdio_node;
+ } else {
+ /* older f/w doesn't populate the mdio subnode,
+ * allow relaxed upgrade of f/w in due time.
+ */
+ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+ }
+
+ ret = of_mdiobus_register(bus, parent);
+ of_node_put(mdio_node);
+
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+ } else {
+ /* Mask out all PHYs from auto probing. */
+ bus->phy_mask = ~0;
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+
+ priv->phydev = get_phy_device(bus, phy_addr, false);
+ if (IS_ERR(priv->phydev)) {
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+ struct resource *mmio_res, *eeprom_res, *irq_res;
+ u8 *mac, macbuf[ETH_ALEN];
+ struct netsec_priv *priv;
+ u32 hw_ver, phy_addr = 0;
+ struct net_device *ndev;
+ int ret;
+
+ mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmio_res) {
+ dev_err(&pdev->dev, "No MMIO resource found.\n");
+ return -ENODEV;
+ }
+
+ eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!eeprom_res) {
+ dev_info(&pdev->dev, "No EEPROM resource found.\n");
+ return -ENODEV;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "No IRQ resource found.\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ spin_lock_init(&priv->reglock);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, priv);
+ ndev->irq = irq_res->start;
+ priv->dev = &pdev->dev;
+ priv->ndev = ndev;
+
+ priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ resource_size(mmio_res));
+ if (!priv->ioaddr) {
+ dev_err(&pdev->dev, "devm_ioremap() failed\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+ resource_size(eeprom_res));
+ if (!priv->eeprom_base) {
+ dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+ if (mac)
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (priv->eeprom_base &&
+ (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ void __iomem *macp = priv->eeprom_base +
+ NETSEC_EEPROM_MAC_ADDRESS;
+
+ ndev->dev_addr[0] = readb(macp + 3);
+ ndev->dev_addr[1] = readb(macp + 2);
+ ndev->dev_addr[2] = readb(macp + 1);
+ ndev->dev_addr[3] = readb(macp + 0);
+ ndev->dev_addr[4] = readb(macp + 7);
+ ndev->dev_addr[5] = readb(macp + 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ if (dev_of_node(&pdev->dev))
+ ret = netsec_of_probe(pdev, priv);
+ else
+ ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+ if (ret)
+ goto free_ndev;
+
+ if (!priv->freq) {
+ dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ /* default for throughput */
+ priv->et_coalesce.rx_coalesce_usecs = 500;
+ priv->et_coalesce.rx_max_coalesced_frames = 8;
+ priv->et_coalesce.tx_coalesce_usecs = 500;
+ priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+ ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+ &ndev->max_mtu);
+ if (ret < 0)
+ ndev->max_mtu = ETH_DATA_LEN;
+
+ /* runtime_pm coverage just for probe, open/close also cover it */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+ /* this driver only supports F_TAIKI style NETSEC */
+ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+ NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+ ret = -ENODEV;
+ goto pm_disable;
+ }
+
+ dev_info(&pdev->dev, "hardware revision %d.%d\n",
+ hw_ver >> 16, hw_ver & 0xffff);
+
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+ ndev->netdev_ops = &netsec_netdev_ops;
+ ndev->ethtool_ops = &netsec_ethtool_ops;
+
+ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features = ndev->features;
+
+ priv->rx_cksum_offload_flag = true;
+
+ ret = netsec_register_mdio(priv, phy_addr);
+ if (ret)
+ goto unreg_napi;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+ dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netif_err(priv, probe, ndev, "register_netdev() failed\n");
+ goto unreg_mii;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+
+unreg_mii:
+ netsec_unregister_mdio(priv);
+unreg_napi:
+ netif_napi_del(&priv->napi);
+pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_ndev:
+ free_netdev(ndev);
+ dev_err(&pdev->dev, "init failed\n");
+
+ return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+ struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+
+ netsec_unregister_mdio(priv);
+
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ clk_prepare_enable(priv->clk);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+ NETSEC_CLK_EN_REG_DOM_C |
+ NETSEC_CLK_EN_REG_DOM_G);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+ SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+ { .compatible = "socionext,synquacer-netsec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+ { "SCX0001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+ .probe = netsec_probe,
+ .remove = netsec_remove,
+ .driver = {
+ .name = "netsec",
+ .pm = &netsec_pm_ops,
+ .of_match_table = netsec_dt_ids,
+ .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+ },
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9c98fd810bb..f99f14c35063 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
+ int interface = priv->plat->interface;
unsigned long flags;
bool ret = false;
+ if ((interface != PHY_INTERFACE_MODE_MII) &&
+ (interface != PHY_INTERFACE_MODE_GMII) &&
+ !phy_interface_mode_is_rgmii(interface))
+ goto out;
+
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 667c44f68dbc..195e0d0add8d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1d025ab9568f..f522715c6595 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -393,7 +393,12 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
-#define DEFAULT_SAK_LEN 16
+#define MACSEC_GCM_AES_128_SAK_LEN 16
+#define MACSEC_GCM_AES_256_SAK_LEN 32
+
+#define MAX_SAK_LEN MACSEC_GCM_AES_256_SAK_LEN
+
+#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
@@ -1600,7 +1605,7 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
- .len = MACSEC_MAX_KEY_LEN, },
+ .len = MAX_SAK_LEN, },
};
static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
@@ -2362,15 +2367,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
{
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+ u64 csid;
if (!secy_nest)
return 1;
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_128;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto cancel;
+ }
+
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
MACSEC_SECY_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID,
- MACSEC_SECY_ATTR_PAD) ||
+ csid, MACSEC_SECY_ATTR_PAD) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -3015,8 +3031,8 @@ static void macsec_setup(struct net_device *dev)
eth_zero_addr(dev->broadcast);
}
-static void macsec_changelink_common(struct net_device *dev,
- struct nlattr *data[])
+static int macsec_changelink_common(struct net_device *dev,
+ struct nlattr *data[])
{
struct macsec_secy *secy;
struct macsec_tx_sc *tx_sc;
@@ -3056,6 +3072,22 @@ static void macsec_changelink_common(struct net_device *dev,
if (data[IFLA_MACSEC_VALIDATION])
secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+
+ if (data[IFLA_MACSEC_CIPHER_SUITE]) {
+ switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_DEFAULT_CIPHER_ALT:
+ secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3071,9 +3103,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- macsec_changelink_common(dev, data);
-
- return 0;
+ return macsec_changelink_common(dev, data);
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3270,8 +3300,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err)
goto unlink;
- if (data)
- macsec_changelink_common(dev, data);
+ if (data) {
+ err = macsec_changelink_common(dev, data);
+ if (err)
+ goto del_dev;
+ }
err = register_macsec_dev(real_dev, dev);
if (err < 0)
@@ -3320,7 +3353,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
switch (csid) {
- case MACSEC_DEFAULT_CIPHER_ID:
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_STD_ICV_LEN)
@@ -3390,12 +3424,24 @@ static int macsec_fill_info(struct sk_buff *skb,
{
struct macsec_secy *secy = &macsec_priv(dev)->secy;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ u64 csid;
+
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_128;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto nla_put_failure;
+ }
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
+ csid, IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
+ /* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
+ return err;
destroy_macvlan_port:
- if (create)
+ /* the macvlan port may be freed by macvlan_uninit when fail to register.
+ * so we destroy the macvlan port only when it's valid.
+ */
+ if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index c2715908d43e..411cf1072bae 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -230,7 +230,7 @@ static int at803x_suspend(struct phy_device *phydev)
static int at803x_resume(struct phy_device *phydev)
{
- return phy_modify(phydev, MII_BMCR, ~(BMCR_PDOWN | BMCR_ISOLATE), 0);
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
static int at803x_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2bd38962b5d2..22d9bc9c33a4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -96,6 +96,17 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_MISC_TEST 0x1b
+#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
+#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
+#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15)
+#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0
+#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14)
+
+#define MII_88E6390_TEMP_SENSOR 0x1c
+#define MII_88E6390_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -668,7 +679,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
/* Enable Scrambler and Auto-Crossover */
ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
- ~MII_88E3016_DISABLE_SCRAMBLER,
+ MII_88E3016_DISABLE_SCRAMBLER,
MII_88E3016_AUTO_MDIX_CROSSOVER);
if (ret < 0)
return ret;
@@ -684,9 +695,9 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
- (u16)~(MII_M1111_HWCFG_MODE_MASK |
- MII_M1111_HWCFG_FIBER_COPPER_AUTO |
- MII_M1111_HWCFG_FIBER_COPPER_RES),
+ MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+ MII_M1111_HWCFG_FIBER_COPPER_RES,
mode);
}
@@ -705,8 +716,7 @@ static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
}
return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
- (u16)~(MII_M1111_RGMII_RX_DELAY |
- MII_M1111_RGMII_TX_DELAY),
+ MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY,
delay);
}
@@ -833,7 +843,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
- ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
if (err < 0)
return err;
@@ -957,7 +967,7 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_modify(phydev, 0x1e, 0xf03f,
+ err = phy_modify(phydev, 0x1e, 0x0fc0,
2 << 9 | /* 36 ohm */
2 << 6); /* 39 ohm */
if (err < 0)
@@ -1379,7 +1389,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
/* Setup LED[2] as interrupt pin (active low) */
err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
- (u16)~MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
if (err < 0)
@@ -1419,7 +1429,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
/* Clear WOL status and disable magic packet matching */
err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
- (u16)~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
if (err < 0)
goto error;
@@ -1739,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
.info = m88e1510_hwmon_info,
};
+static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
+{
+ int sum = 0;
+ int oldpage;
+ int ret = 0;
+ int i;
+
+ *temp = 0;
+
+ oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
+ MII_88E6390_MISC_TEST_SAMPLE_1S;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ /* Reading the temperature sense has an errata. You need to read
+ * a number of times and take an average.
+ */
+ for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) {
+ ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR);
+ if (ret < 0)
+ goto error;
+ sum += ret & MII_88E6390_TEMP_SENSOR_MASK;
+ }
+
+ sum /= MII_88E6390_TEMP_SENSOR_SAMPLES;
+ *temp = (sum - 75) * 1000;
+
+ /* Disable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+
+error:
+ phy_restore_page(phydev, oldpage, ret);
+
+ return ret;
+}
+
+static int m88e6390_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e6390_get_temp(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static umode_t m88e6390_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e6390_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e6390_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e6390_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
+ .is_visible = m88e6390_hwmon_is_visible,
+ .read = m88e6390_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
+ .ops = &m88e6390_hwmon_hwmon_ops,
+ .info = m88e6390_hwmon_info,
+};
+
static int marvell_hwmon_name(struct phy_device *phydev)
{
struct marvell_priv *priv = phydev->priv;
@@ -1785,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
+}
#else
static int m88e1121_hwmon_probe(struct phy_device *phydev)
{
@@ -1795,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return 0;
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
#endif
static int marvell_probe(struct phy_device *phydev)
@@ -1832,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
+static int m88e6390_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e6390_hwmon_probe(phydev);
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -2123,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = m88e1510_probe,
+ .probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 08e0647b85e2..8d370667fa1b 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* Just ioremap, as this MDIO block is usually integrated into an
* Ethernet MAC controller register range
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 44d09b192014..e75989ce8850 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -332,7 +332,7 @@ EXPORT_SYMBOL(phy_write_mmd);
* @set: bit mask of bits to set
*
* Unlocked helper function which allows a PHY register to be modified as
- * new register value = (old register value & mask) | set
+ * new register value = (old register value & ~mask) | set
*/
int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 2c5b2e041c0f..6bd11a070ec8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1393,7 +1393,7 @@ EXPORT_SYMBOL(genphy_setup_forced);
int genphy_restart_aneg(struct phy_device *phydev)
{
/* Don't isolate the PHY if we're negotiating */
- return phy_modify(phydev, MII_BMCR, ~BMCR_ISOLATE,
+ return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
EXPORT_SYMBOL(genphy_restart_aneg);
@@ -1666,13 +1666,13 @@ EXPORT_SYMBOL(genphy_suspend);
int genphy_resume(struct phy_device *phydev)
{
- return phy_modify(phydev, MII_BMCR, ~BMCR_PDOWN, 0);
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
}
EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
- return phy_modify(phydev, MII_BMCR, ~BMCR_LOOPBACK,
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
enable ? BMCR_LOOPBACK : 0);
}
EXPORT_SYMBOL(genphy_loopback);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index d1f9466f2fbf..6ac8b29b2dc3 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1525,6 +1525,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1547,6 +1548,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1659,9 +1661,8 @@ static void phylink_sfp_link_down(void *upstream)
ASSERT_RTNL();
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+ queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
-
- netif_carrier_off(pl->netdev);
}
static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 3ecc378e0716..bdc4bb3c8288 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -489,7 +489,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
void sfp_unregister_upstream(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
bus->upstream = NULL;
bus->netdev = NULL;
rtnl_unlock();
@@ -592,7 +593,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->netdev)
+ sfp_unregister_bus(bus);
bus->sfp_dev = NULL;
bus->sfp = NULL;
bus->socket_ops = NULL;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 0a886fda0129..7c38659b2a76 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,7 +330,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
if (!q)
return RX_HANDLER_PASS;
- if (__skb_array_full(&q->skb_array))
+ if (__ptr_ring_full(&q->ring))
goto drop;
skb_push(skb, ETH_HLEN);
@@ -348,7 +348,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
goto drop;
if (!segs) {
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
goto wake_up;
}
@@ -358,7 +358,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (skb_array_produce(&q->skb_array, segs)) {
+ if (ptr_ring_produce(&q->ring, segs)) {
kfree_skb(segs);
kfree_skb_list(nskb);
break;
@@ -375,7 +375,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
!(features & NETIF_F_CSUM_MASK) &&
skb_checksum_help(skb))
goto drop;
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
}
@@ -497,7 +497,7 @@ static void tap_sock_destruct(struct sock *sk)
{
struct tap_queue *q = container_of(sk, struct tap_queue, sk);
- skb_array_cleanup(&q->skb_array);
+ ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
}
static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +517,7 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
@@ -546,7 +546,7 @@ static int tap_open(struct inode *inode, struct file *file)
err = tap_set_queue(tap, file, q);
if (err) {
- /* tap_sock_destruct() will take care of freeing skb_array */
+ /* tap_sock_destruct() will take care of freeing ptr_ring */
goto err_put;
}
@@ -583,7 +583,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait)
mask = 0;
poll_wait(file, &q->wq.wait, wait);
- if (!skb_array_empty(&q->skb_array))
+ if (!ptr_ring_empty(&q->ring))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(&q->sk) ||
@@ -844,7 +844,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
- skb = skb_array_consume(&q->skb_array);
+ skb = ptr_ring_consume(&q->ring);
if (skb)
break;
if (noblock) {
@@ -1176,7 +1176,7 @@ static int tap_peek_len(struct socket *sock)
{
struct tap_queue *q = container_of(sock, struct tap_queue,
sock);
- return skb_array_peek_len(&q->skb_array);
+ return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
}
/* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1202,7 @@ struct socket *tap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tap_get_socket);
-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
{
struct tap_queue *q;
@@ -1211,29 +1211,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
q = file->private_data;
if (!q)
return ERR_PTR(-EBADFD);
- return &q->skb_array;
+ return &q->ring;
}
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
struct tap_queue *q;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tap->numqueues;
int ret, i = 0;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
list_for_each_entry(q, &tap->queue_list, next)
- arrays[i++] = &q->skb_array;
+ rings[i++] = &q->ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
- kfree(arrays);
+ kfree(rings);
return ret;
}
EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e7c5f4b2a9a6..2fba3be5719e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,7 @@ struct tun_file {
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
- struct skb_array tx_array;
+ struct ptr_ring tx_ring;
struct xdp_rxq_info xdp_rxq;
};
@@ -241,6 +241,24 @@ struct tun_struct {
struct tun_steering_prog __rcu *steering_prog;
};
+bool tun_is_xdp_buff(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -631,12 +649,25 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
+static void tun_ptr_free(void *ptr)
+{
+ if (!ptr)
+ return;
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ __skb_array_destroy_skb(ptr);
+ }
+}
+
static void tun_queue_purge(struct tun_file *tfile)
{
- struct sk_buff *skb;
+ void *ptr;
- while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
- kfree_skb(skb);
+ while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+ tun_ptr_free(ptr);
skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
@@ -689,7 +720,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
unregister_netdevice(tun->dev);
}
if (tun) {
- skb_array_cleanup(&tfile->tx_array);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
}
sock_put(&tfile->sk);
@@ -782,7 +813,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}
if (!tfile->detached &&
- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
@@ -1048,7 +1079,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
- if (skb_array_produce(&tfile->tx_array, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
/* Notify and wake up reader process */
@@ -1221,6 +1252,67 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
};
+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct xdp_buff *buff = xdp->data_hard_start;
+ int headroom = xdp->data - xdp->data_hard_start;
+ struct tun_file *tfile;
+ u32 numqueues;
+ int ret = 0;
+
+ /* Assure headroom is available and buff is properly aligned */
+ if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+ return -ENOSPC;
+
+ *buff = *xdp;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Encode the XDP flag into lowest bit for consumer to differ
+ * XDP buffer from sk_buff.
+ */
+ if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ ret = -ENOSPC;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
+ u32 numqueues;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues)
+ goto out;
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Notify and wake up reader process */
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -1238,6 +1330,8 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
+ .ndo_xdp_xmit = tun_xdp_xmit,
+ .ndo_xdp_flush = tun_xdp_flush,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1316,7 +1410,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_array_empty(&tfile->tx_array))
+ if (!ptr_ring_empty(&tfile->tx_ring))
mask |= POLLIN | POLLRDNORM;
if (tun->dev->flags & IFF_UP &&
@@ -1862,6 +1956,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}
+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp,
+ struct iov_iter *iter)
+{
+ int vnet_hdr_sz = 0;
+ size_t size = xdp->data_end - xdp->data;
+ struct tun_pcpu_stats *stats;
+ size_t ret;
+
+ if (tun->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr gso = { 0 };
+
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+ return -EINVAL;
+ if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+ sizeof(gso)))
+ return -EFAULT;
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ }
+
+ ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += ret;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
+
+ return ret;
+}
+
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
@@ -1959,15 +2087,14 @@ done:
return total;
}
-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
- int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
{
DECLARE_WAITQUEUE(wait, current);
- struct sk_buff *skb = NULL;
+ void *ptr = NULL;
int error = 0;
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
goto out;
if (noblock) {
error = -EAGAIN;
@@ -1978,8 +2105,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
current->state = TASK_INTERRUPTIBLE;
while (1) {
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
@@ -1998,12 +2125,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
out:
*err = error;
- return skb;
+ return ptr;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock, struct sk_buff *skb)
+ int noblock, void *ptr)
{
ssize_t ret;
int err;
@@ -2011,23 +2138,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to)) {
- if (skb)
- kfree_skb(skb);
+ tun_ptr_free(ptr);
return 0;
}
- if (!skb) {
+ if (!ptr) {
/* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
+ ptr = tun_ring_recv(tfile, noblock, &err);
+ if (!ptr)
return err;
}
- ret = tun_put_user(tun, tfile, skb, to);
- if (unlikely(ret < 0))
- kfree_skb(skb);
- else
- consume_skb(skb);
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ ret = tun_put_user_xdp(tun, tfile, xdp, to);
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ struct sk_buff *skb = ptr;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ }
return ret;
}
@@ -2164,12 +2299,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
- struct sk_buff *skb = m->msg_control;
+ void *ptr = m->msg_control;
int ret;
if (!tun) {
ret = -EBADFD;
- goto out_free_skb;
+ goto out_free;
}
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2181,7 +2316,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2192,12 +2327,25 @@ out:
out_put_tun:
tun_put(tun);
-out_free_skb:
- if (skb)
- kfree_skb(skb);
+out_free:
+ tun_ptr_free(ptr);
return ret;
}
+static int tun_ptr_peek_len(void *ptr)
+{
+ if (likely(ptr)) {
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+ return __skb_array_len_with_tag(ptr);
+ } else {
+ return 0;
+ }
+}
+
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2208,7 +2356,7 @@ static int tun_peek_len(struct socket *sock)
if (!tun)
return 0;
- ret = skb_array_peek_len(&tfile->tx_array);
+ ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
tun_put(tun);
return ret;
@@ -3114,25 +3262,26 @@ static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tun->numqueues + tun->numdisabled;
int ret, i;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- arrays[i] = &tfile->tx_array;
+ rings[i] = &tfile->tx_ring;
}
list_for_each_entry(tfile, &tun->disabled, next)
- arrays[i++] = &tfile->tx_array;
+ rings[i++] = &tfile->tx_ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
- kfree(arrays);
+ kfree(rings);
return ret;
}
@@ -3218,7 +3367,7 @@ struct socket *tun_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tun_get_socket);
-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
{
struct tun_file *tfile;
@@ -3227,9 +3376,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
- return &tfile->tx_array;
+ return &tfile->tx_ring;
}
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);
module_init(tun_init);
module_exit(tun_cleanup);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cfaa07f230e5..ae0580b577b8 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ed8299343728..12dfc5fee58e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1906,6 +1906,24 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
+ speed));
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+ duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
+ duplex));
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -2159,6 +2177,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
+ virtnet_update_settings(vi);
netif_carrier_on(vi->dev);
netif_tx_wake_all_queues(vi->dev);
} else {
@@ -2707,6 +2726,7 @@ static int virtnet_probe(struct virtio_device *vdev)
schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
+ virtnet_update_settings(vi);
netif_carrier_on(dev);
}
@@ -2808,7 +2828,8 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
- VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
+ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+ VIRTIO_NET_F_SPEED_DUPLEX
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index a346b4923550..41d3a3c1104e 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
- for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
- addr = i / efuse->config.word_size;
+ for (i = 0; i < bytes; i += efuse->config.word_size) {
+ addr = (offset + i) / efuse->config.word_size;
err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
if (err)
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
- printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
+ printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
- DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
+ DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
__func__, bus, bus->busn_res.start,
bus->bridge->platform_data);
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed "
- "0x%lx/%lx (hpa 0x%p)\n",
+ "0x%lx/%lx (hpa 0x%px)\n",
name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
return retval;
}
- printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
+ printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
return 0;
}
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 04dac6a42c9f..6b8d060d07de 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index accaaaccb662..6601ad0dfb3a 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
+ if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index cb09245e9b4c..c845facacb06 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
depends on ARCH_RENESAS
depends on EXTCON
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index ee85fa0ca4b0..7492c8978217 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy: %s\n",
child_np->name);
+ pm_runtime_disable(dev);
return PTR_ERR(phy);
}
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "Failed to register phy provider\n");
+ pm_runtime_disable(dev);
return PTR_ERR(phy_provider);
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 4307bf0013e1..63e916d4d069 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
static struct device_node *
tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *pads, *np;
+
+ pads = of_get_child_by_name(padctl->dev->of_node, "pads");
+ if (!pads)
+ return NULL;
- np = of_find_node_by_name(np, "pads");
- if (np)
- np = of_find_node_by_name(np, name);
+ np = of_get_child_by_name(pads, name);
+ of_node_put(pads);
return np;
}
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
static struct device_node *
tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(pad->dev.of_node);
+ struct device_node *np, *lanes;
- np = of_find_node_by_name(np, "lanes");
- if (!np)
+ lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
+ if (!lanes)
return NULL;
- return of_find_node_by_name(np, pad->soc->lanes[index].name);
+ np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
+ of_node_put(lanes);
+
+ return np;
}
static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
unsigned int i;
int err;
- children = of_find_node_by_name(pad->dev.of_node, "lanes");
+ children = of_get_child_by_name(pad->dev.of_node, "lanes");
if (!children)
return -ENODEV;
@@ -444,21 +444,21 @@ static struct device_node *
tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *ports, *np;
+ char *name;
- np = of_find_node_by_name(np, "ports");
- if (np) {
- char *name;
+ ports = of_get_child_by_name(padctl->dev->of_node, "ports");
+ if (!ports)
+ return NULL;
- name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
- if (!name)
- return ERR_PTR(-ENOMEM);
- np = of_find_node_by_name(np, name);
- kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name) {
+ of_node_put(ports);
+ return ERR_PTR(-ENOMEM);
}
+ np = of_get_child_by_name(ports, name);
+ kfree(name);
+ of_node_put(ports);
return np;
}
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
static int tegra_xusb_padctl_probe(struct platform_device *pdev)
{
- struct device_node *np = of_node_get(pdev->dev.of_node);
+ struct device_node *np = pdev->dev.of_node;
const struct tegra_xusb_padctl_soc *soc;
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
int err;
/* for backwards compatibility with old device trees */
- np = of_find_node_by_name(np, "pads");
+ np = of_get_child_by_name(np, "pads");
if (!np) {
dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
return tegra_xusb_padctl_legacy_probe(pdev);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e6cd8de793e2..3501491e5bfc 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
*/
static struct lock_class_key pcs_lock_class;
+/* Class for the IRQ request mutex */
+static struct lock_class_key pcs_request_class;
+
/*
* REVISIT: Reads and writes could eventually use regmap or something
* generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, pcs_soc);
irq_set_chip_and_handler(irq, &pcs->chip,
handle_level_irq);
- irq_set_lockdep_class(irq, &pcs_lock_class);
+ irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a276c61be217..e62ab087bfd8 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
}
static int stm32_gpio_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
class_unregister(&wmi_bus_class);
}
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
module_exit(acpi_wmi_exit);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606e0df8..ee14d8e45c97 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba15a53..614b44e70a28 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
endif
+CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index a9996c16f4ae..26ce17178401 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1415,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
* check.
*/
if (sdev->channel != starget->channel ||
- sdev->id != starget->id ||
+ sdev->id != starget->id)
+ continue;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL ||
!get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1b06cf0375dc..3b3d1d050cac 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index a517b2d29f1b..8f6494158d3d 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
config ION_CMA_HEAP
bool "Ion CMA heap support"
- depends on ION && CMA
+ depends on ION && DMA_CMA
help
Choose this option to enable CMA heaps with Ion. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a7d9b0e98572..f480885e346b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dd5545d9990a..86196ffd2faf 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct sg_table *table;
struct page *pages;
+ unsigned long size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
int ret;
- pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (ret)
goto free_mem;
- sg_set_page(table->sgl, pages, len, 0);
+ sg_set_page(table->sgl, pages, size, 0);
buffer->priv_virt = pages;
buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
free_mem:
kfree(table);
err:
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
return -ENOMEM;
}
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct page *pages = buffer->priv_virt;
+ unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
/* release memory */
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
/* release sg table */
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 986c2a40d978..8267119ccc8e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
ksocknal_nid2peerlist(id.nid));
}
- route2 = NULL;
list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
- if (route2->ksnr_ipaddr == ipaddr)
- break;
-
- route2 = NULL;
- }
- if (!route2) {
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
+ if (route2->ksnr_ipaddr == ipaddr) {
+ /* Route already exists, use the old one */
+ ksocknal_route_decref(route);
+ route2->ksnr_share_count++;
+ goto out;
+ }
}
-
+ /* Route doesn't already exist, add the new one */
+ ksocknal_add_route_locked(peer, route);
+ route->ksnr_share_count++;
+out:
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 419a7a90bce0..f45bcbc63738 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
return;
if (ring->start_poll) {
- __ring_interrupt_mask(ring, false);
+ __ring_interrupt_mask(ring, true);
ring->start_poll(ring->poll_data);
} else {
schedule_work(&ring->work);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 427e0d5d8f13..539b49adb6af 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
- if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->read_tail;
if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
down_write(&tty->termios_rwsem);
- if (L_ICANON(tty))
+ if (L_ICANON(tty) && !L_EXTPROC(tty))
retval = inq_canon(ldata);
else
retval = read_cnt(ldata);
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3593ce0ec641..880009987460 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
if (ret)
goto err_mux;
- ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi");
+ ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
if (ulpi_node) {
phy_node = of_get_next_available_child(ulpi_node, NULL);
ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 78e92d29f8d9..c821b4b9647e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1007,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
case USB_SSP_CAP_TYPE:
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
- USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+ USB_SSP_SUBLINK_SPEED_ATTRIBS);
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a10b346b9777..4024926c1d68 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -149,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+ /* ELSA MicroLink 56K */
+ { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f7895dbcf88..e26e685d8a57 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
{
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
seq_printf(s, "%d\n", ring->cycle_state);
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
- struct xhci_ring *ring,
+ struct xhci_ring **ring,
const char *name,
struct dentry *parent)
{
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
- dev->eps[ep_index].new_ring,
+ &dev->eps[ep_index].new_ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
priv->dev = dev;
dev->debugfs_private = priv;
- xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+ xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
"ep00", priv->root);
xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
ARRAY_SIZE(xhci_extcap_dbc),
"reg-ext-dbc");
- xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7ef1274ef7f7..1aad89b8aba0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -178,6 +178,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0014)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)
xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2424d3020ca3..da6dbe3ebd8b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
-
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
+ if (ret) {
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
+ }
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1aba9105b369..fc68952c994a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4faa09fe308c..8b4ecd2bd297 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -915,6 +915,12 @@
#define ICPDAS_I7563U_PID 0x0105
/*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
+#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3b3513874cfd..b6320e3be429 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
@@ -280,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_ME910 0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -645,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
.reserved = BIT(1) | BIT(3),
};
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(3),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -674,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
.reserved = BIT(4) | BIT(5),
};
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+ .reserved = BIT(1) | BIT(4),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1178,6 +1190,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Yuga products use Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+ .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1244,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+ .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e3892541a489..613f91add03d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
+ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
break;
case 2:
dev_dbg(dev, "NMEA GPS interface found\n");
+ sendsetup = true;
break;
case 3:
dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3df8ee82faf..e31a6f204397 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
* step 1?
*/
if (ud->tcp_socket) {
- dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
- ud->tcp_socket);
+ dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 4f48b306713f..c31c8402a0c5 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
struct stub_priv *priv;
struct urb *urb;
- dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) {
urb = priv->urb;
- dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+ dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+ priv->seqnum);
usb_kill_urb(urb);
kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 493ac2928391..6c5a59313999 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
- dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
- priv->urb);
-
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
- "failed to unlink a urb %p, ret %d\n",
- priv->urb, ret);
+ "failed to unlink a urb # %lu, ret %d\n",
+ priv->seqnum, ret);
return 0;
}
@@ -342,14 +339,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
epd = &ep->desc;
- /* validate transfer_buffer_length */
- if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
- dev_err(&sdev->udev->dev,
- "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
- pdu->u.cmd_submit.transfer_buffer_length);
- return -1;
- }
-
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
@@ -482,8 +471,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
/* allocate urb transfer buffer, if needed */
- if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
- pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
+ if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 53172b1f6257..f0ec41a50cbc 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
if (sdev->ud.tcp_socket == NULL) {
- usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+ usbip_dbg_stub_tx("ignore urb for closed connection\n");
/* It will be freed in stub_device_cleanup_urbs(). */
} else if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -190,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index f7978933b402..7b219d9109b4 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -317,26 +317,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
int total = 0;
+ if (!sock || !buf || !size)
+ return -EINVAL;
+
iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
usbip_dbg_xmit("enter\n");
- if (!sock || !buf || !size) {
- pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
- size);
- return -EINVAL;
- }
-
do {
- int sz = msg_data_left(&msg);
+ msg_data_left(&msg);
sock->sk->sk_allocation = GFP_NOIO;
result = sock_recvmsg(sock, &msg, MSG_WAITALL);
- if (result <= 0) {
- pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
- sock, buf + total, sz, result, total);
+ if (result <= 0)
goto err;
- }
total += result;
} while (msg_data_left(&msg));
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 6b3278c4b72a..c3e1008aa491 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct vhci_device *vdev;
unsigned long flags;
- usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
- hcd, urb, mem_flags);
-
if (portnum > VHCI_HC_PORTS) {
pr_err("invalid port number %d\n", portnum);
return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct vhci_device *vdev;
unsigned long flags;
- pr_info("dequeue a urb %p\n", urb);
-
spin_lock_irqsave(&vhci->lock, flags);
priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* tcp connection is closed */
spin_lock(&vdev->priv_lock);
- pr_info("device %p seems to be disconnected\n", vdev);
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* vhci_rx will receive RET_UNLINK and give back the URB.
* Otherwise, we give back it here.
*/
- pr_info("gives back urb %p\n", urb);
-
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink->unlink_seqnum = priv->seqnum;
- pr_info("device %p seems to be still connected\n", vdev);
-
/* send cmd_unlink and try to cancel the pending URB in the
* peer */
list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
/* need this? see stub_dev.c */
if (ud->tcp_socket) {
- pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+ pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 90577e8b2282..112ebb90d8c9 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
urb = priv->urb;
status = urb->status;
- usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
- urb, priv, seqnum);
+ usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
switch (status) {
case -ENOENT:
/* fall through */
case -ECONNRESET:
- dev_info(&urb->dev->dev,
- "urb %p was unlinked %ssynchronuously.\n", urb,
- status == -ENOENT ? "" : "a");
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u was unlinked %ssynchronuously\n",
+ seqnum, status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
/* no info output */
break;
default:
- dev_info(&urb->dev->dev,
- "urb %p may be in a error, status %d\n", urb,
- status);
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u may be in a error, status %d\n",
+ seqnum, status);
}
list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
- pr_info("max seqnum %d\n",
+ pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+ pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock_irqsave(&vhci->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum);
} else {
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index d625a2ff4b71..9aed15a358b7 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
- usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+ priv->seqnum);
/* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c7bdeb655646..7baa90abe097 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {
#define VHOST_RX_BATCH 64
struct vhost_net_buf {
- struct sk_buff **queue;
+ void **queue;
int tail;
int head;
};
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
- struct skb_array *rx_array;
+ struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
};
@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
struct vhost_net_buf *rxq = &nvq->rxq;
rxq->head = 0;
- rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
VHOST_RX_BATCH);
return rxq->tail;
}
@@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
- if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
- skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
- vhost_net_buf_get_size(rxq));
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ __skb_array_destroy_skb);
rxq->head = rxq->tail = 0;
}
}
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
@@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
return 0;
out:
- return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}
static void vhost_net_buf_init(struct vhost_net_buf *rxq)
@@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
int len = 0;
unsigned long flags;
- if (rvq->rx_array)
+ if (rvq->rx_ring)
return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net)
};
size_t total_len = 0;
int err, mergeable;
- s16 headcount;
+ s16 headcount, nheads = 0;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
@@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+ headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
@@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
- if (nvq->rx_array)
+ if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
@@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount);
goto out;
}
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- headcount);
+ nheads += headcount;
+ if (nheads > VHOST_RX_BATCH) {
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
+ nheads = 0;
+ }
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
@@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net)
}
vhost_net_enable_vq(net, vq);
out:
+ if (nheads)
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
mutex_unlock(&vq->mutex);
}
@@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- struct sk_buff **queue;
+ void **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1046,23 +1065,23 @@ err:
return ERR_PTR(r);
}
-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
{
- struct skb_array *array;
+ struct ptr_ring *ring;
struct file *file = fget(fd);
if (!file)
return NULL;
- array = tun_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = tap_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = NULL;
+ ring = NULL;
out:
fput(file);
- return array;
+ return ring;
}
static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vq->private_data = sock;
vhost_net_buf_unproduce(nvq);
if (index == VHOST_NET_VQ_RX)
- nvq->rx_array = get_tap_skb_array(fd);
+ nvq->rx_ring = get_tap_ptr_ring(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index d1e1d8d2b9d5..4c789e61554b 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit();
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);