summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/x86/s2idle.c14
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/core.c9
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c15
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/dma-buf/dma-buf.c21
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/idxd/sysfs.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/gpi.c10
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/amd_energy.c3
-rw-r--r--drivers/hwmon/pwm-fan.c12
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c27
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig23
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/dma-iommu.c27
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c148
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/svm.c23
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h30
-rw-r--r--drivers/md/bcache/super.c53
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c77
-rw-r--r--drivers/misc/habanalabs/common/device.c8
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c60
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h4
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c7
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/pci.c28
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c191
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c75
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h9
-rw-r--r--drivers/misc/pvpanic.c19
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c26
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c17
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c34
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c71
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c38
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c129
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ipa/gsi.c127
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c10
-rw-r--r--drivers/nvme/host/tcp.c12
-rw-r--r--drivers/nvme/target/fcloop.c7
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/regulator/bd718x7-regulator.c57
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c66
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/spi/spi-altera.c26
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/tty/Kconfig14
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/ttynull.c18
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c21
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/configfs.c19
-rw-r--r--drivers/usb/gadget/function/f_printer.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_ether.c9
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c35
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/vsock.c68
253 files changed, 2387 insertions, 1389 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..ebcf534514be 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 25fea34b544c..2b69536cdccb 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info_amd info = { };
- if (package->type == ACPI_TYPE_INTEGER) {
- switch (i) {
- case 0:
- info.revision = package->integer.value;
- break;
- case 1:
- info.count = package->integer.value;
- break;
- }
- } else if (package->type == ACPI_TYPE_PACKAGE) {
+ if (package->type == ACPI_TYPE_PACKAGE) {
lpi_constraints_table = kcalloc(package->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
for (k = 0; k < info_obj->package.count; ++k) {
union acpi_object *obj = &info_obj->package.elements[k];
- union acpi_object *obj_new;
list = &lpi_constraints_table[lpi_constraints_table_size];
list->min_dstate = -1;
- obj_new = &obj[k];
switch (k) {
case 0:
dev_info.enabled = obj->integer.value;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..5f0472c18bcb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..14f165816742 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -4414,6 +4414,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4438,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 262326973ee0..583b671b1d2d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
Milind Dumbare <Milind.dumbare@gmail.com>
Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 96e3f9fe8241..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
kfree(iu);
}
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
} else {
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index b8e44331e494..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
{
- mutex_lock(&sess_dev->sess->lock);
- rnbd_srv_destroy_dev_session_sysfs(sess_dev);
- mutex_unlock(&sess_dev->sess->lock);
+ struct rnbd_srv_session *sess = sess_dev->sess;
+
sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
}
static int process_msg_close(struct rtrs_srv *rtrs,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1a660466dd75..be05e038d956 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -2653,12 +2643,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
- cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
- cpu->pstate.turbo_pstate;
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e63684d4cd90..9ad6397aaa97 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 266423a2cabc..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 584c931e807a..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5773d474d8f..88579857ca1d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index d2334f535de2..1a0bf6b0567a 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
len = 1 << bit;
ring->alloc_size = (len + (len - 1));
dev_dbg(gpii->gpi_dev->dev,
- "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
elements, el_size, (elements * el_size), len,
ring->alloc_size);
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
ring->alloc_size,
&ring->dma_handle, GFP_KERNEL);
if (!ring->pre_aligned) {
- dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
ring->alloc_size);
return -ENOMEM;
}
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
smp_wmb();
dev_dbg(gpii->gpi_dev->dev,
- "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
- ring->dma_handle, ring->phys_addr, ring->len,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
ring->el_size, ring->elements);
return 0;
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
return ret;
error_start_chan:
- for (i = i - 1; i >= 0; i++) {
+ for (i = i - 1; i >= 0; i--) {
gpi_stop_chan(&gpii->gchan[i]);
gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index e4637ec786d3..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 87157cbae1b8..298460438bb4 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
ud->tchan_tpl.levels = 1;
}
- ud->tchan_tpl.levels = ud->tchan_tpl.levels;
- ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
- ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..79777550a6ff 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1cb7d73f7317..b69c34074d8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2548,11 +2548,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..347fec669424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..82e952696d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..07104a1de308 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 519080e9a233..146486071d01 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2386,8 +2386,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
- drm_connector_list_update(connector);
+ drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -8379,8 +8378,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
}
-#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
@@ -8401,7 +8399,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
}
-#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..eba2f1d35d07 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -46,13 +46,13 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
}
/* amdgpu_dm_crc.c */
-#ifdef CONFIG_DEBUG_FS
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
+#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name,
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e1071b2181f..f4a2088ab179 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..36745193c391 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..c20331eb62e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..3ca7d911d25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..8d4924b7dc22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..88322781e447 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9608745d732f..12b36eb0ff6a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2372,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 8cb4fcee9a2c..5c1482d4ca43 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..f743685a20e8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..06abf2a7ce9e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..34d78c654df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1436,6 +1436,9 @@ struct intel_dp {
bool ycbcr_444_to_420;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..37f1a10fd021 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1622,7 +1622,7 @@ done:
ret = recv_bytes;
out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
kfree(intel_dp->aux.name);
}
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bcc80f428172..bd3046e5a934 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..602f1a0bc587 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..b0899b665e85 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..88ad754962af 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a3ee4f9dc0a..632c713227dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..12e75ba360f9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..f09175698827 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
}
mmu = msm_iommu_new(&pdev->dev, iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..b3d9a333591b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+ return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..3bc7ed21de28 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..d1780bcac8cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..108c405e03dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9a7c49bc394f..9d10739c4eb2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
+ put_iova_vmas(obj);
+
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..23195d5d4e91 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 7b2f60616750..a00b7ab9c14c 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -507,7 +507,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -525,7 +524,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 502f8cd95f6d..d491fdcee61f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..822c2e74b98d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..111a91dc6b79 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..877fe3733a42 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..0818d3e50734 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..b3fb5b02bcf1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index f54cd79b43e4..6a1f7048dacc 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7e2c445a1fae..f0adbc48fd17 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5dff7ffbef11..1b83d140742f 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..4078358ed66e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
- /*
- * The Intel graphic driver is used to assume that the returned
- * sg list is not combound. This blocks the efforts of converting
- * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
- * device driver work and should be removed once it's fixed in i915
- * driver.
- */
- if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
- to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
- (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for_each_sg(sg, s, nents, i) {
- unsigned int s_iova_off = sg_dma_address(s);
- unsigned int s_length = sg_dma_len(s);
- unsigned int s_iova_len = s->length;
-
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = dma_addr + s_iova_off;
- sg_dma_len(s) = s_length;
- dma_addr += s_iova_len;
-
- pr_info_once("sg combining disabled due to i915 driver\n");
- }
-
- return nents;
- }
-
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..004feaed3c72 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 788119c5b021..65cf06d70bf4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -719,6 +719,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -744,6 +746,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1468,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1564,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1877,6 +1898,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2547,7 +2569,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4497,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
+
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4580,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -4548,10 +4611,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -4560,8 +4622,10 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
return ret;
@@ -4581,14 +4645,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..685200a5cff0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4fa248b98031..790ef3497e7e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
}
}
@@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +281,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +364,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +383,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +488,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +549,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +579,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -605,14 +608,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +635,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4bb3293ae4d7..d20b8b333d30 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
* @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
* other.
*/
void
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..84fc2c0f0101 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a4752ac410dc..2047a9cccdb5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..b2b3d2b0f808 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..1456eabf9601 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -1092,6 +1092,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1104,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..20f77f58edef 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -627,25 +627,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +668,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +737,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,18 +821,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..e0d7f5fbaa5c 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
};
/**
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..12efbd9d2e3a 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -335,6 +335,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +354,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -406,7 +411,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..76217258780a 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 923b2606e29f..b4725e6101f6 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..8c09e4466af8 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -7936,7 +7911,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7953,7 +7928,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..a7ab2d7e57d4 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..88a09d42e111 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..b8b4aa636b7c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -5324,7 +5339,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..b637dfd69f6e 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -145,11 +145,15 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +175,7 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..41cab297d66e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
atomic_notifier_chain_register(&panic_notifier_list,
&pvpanic_panic_nb);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..85de5f96c02b 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
+ LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..da551fd0f502 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 24c737c4fc44..970f0e9d19bf 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..36235afb0bc6 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..e01191107a4b 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..662e68a0e7e6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..b1ae9eb8f247 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2577,6 +2577,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..d10e4f85dd11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..51996c85547e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..814a5b10141d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index a0e0d8a83681..51dd030b3b36 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
+ if (!n || !n->dev)
goto free_sk;
ndev = n->dev;
@@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1247,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1397,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1413,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1597,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1997,39 +2010,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2058,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2090,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2109,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2135,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..6d853f018d53 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..11d4bf5dc21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..c242883fea5d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..ca46bc9110d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..674b3a22e91f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..f6d817a3edcb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f302504faa8a..9778c83150f1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -2341,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2981,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3873,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..6fb46682b058 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..118473dfdcbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..1db482d310c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..21ee56420c3a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..bc4d8d144401 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..4b1808acef58 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..a30eb90ba3d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..1a8f5a039d50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
- return err;
+ goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..76177f7c5ec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..072363e73f1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
+ return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
-
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+ ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
}
@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..4880f2179273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..1fae7fab8297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..2d37742a888c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..e02e5895703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1390,6 +1392,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7a79d330c075..6a852b4901aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..61ed671fe741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..ca6f2fc39ea0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..ac4cd5d82e69 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..ca0ee29a57b5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..a569abe7f5ef 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..9a6a519426a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -721,6 +721,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +737,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..f184b00f5116 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..a5e0eff4a387 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
return ret;
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..14d9a791924b 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
* is issued here. Only permit *this* event ring to trigger
* an interrupt, and only enable the event control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
val = BIT(evt_ring_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +426,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
* issued here. So we only permit *this* channel to trigger
* an interrupt and only enable the channel control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
val = BIT(channel_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
msleep(1); /* A short delay is required before a RESET command */
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..135c393437f1 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
return ret;
data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->imem_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_memory_path_disable;
data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->config_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_imem_path_disable;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..978ac0981d16 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..5a78848db93f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1863,9 +1867,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
usbnet_link_change(dev, !!event->wValue, 0);
break;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..af19513a9f75 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4c41df624dbb..508408fbe78f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..920e5026a635 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..c1608f64ea95 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..20b415cd96c4 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..b69e7ebfa930 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..8553ed061aea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..0db623ff4bb9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..73869d445c5b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..102a8f14c22d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..b95d093728b9 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..f320273fc672 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 38373a0e86ef..5f36cfa8136c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e49f61f81df..88a6b97247f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b4385cb0ff60..50d9a20568a2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1ba659927442..979ee31b8dd1 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 733d9363900e..68213f0a052b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int opcode, starting, amount;
+ unsigned int opcode;
+ int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5c1e7cb7fe0d..bdfc22eb2a10 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..5abdd29fb9f3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..9309765d0450 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..af9918cd27aa 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..c395a8dda6f7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..28f637042d44 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5507,12 +5507,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- return qeth_set_offline(card, false);
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..4c2cae7ae9a7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2b28dd405600..e821dd32dd28 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b6d4419c32f2..cf0bfac920a8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2614,6 +2614,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b57177b52fac..9adfdefef9ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return 0;
+
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6e4bf05c6d77..af192096a82b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..cbc4c28c1541 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..6017209c6d2f 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..720ab34784c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..80d74cce2a01 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..b666cb23e5ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d241349214e7..bc4bb4374313 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 47a6e42f0d04..e15cd6b5bb99 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 3c1c5a9240a7..b3ccae932660 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
- n_null.o ttynull.o
+ n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index eced70ec54e1..17f05b7eb6d3 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
-void __init register_ttynull_console(void)
-{
- if (!ttynull_driver)
- return;
-
- if (add_preferred_console(ttynull_console.name, 0, NULL))
- return;
-
- register_console(&ttynull_console);
-}
-
static int __init ttynull_init(void)
{
struct tty_driver *driver;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..134dc2005ce9 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..1b241f937d8f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..ee44321fee38 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..1a556a628971 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..36ffb43f9c1a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
+
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
- return sprintf(page, "%s\n", udc_name ?: "");
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..2f1eb2e81d30 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..c019f2b0c0af 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..6a62bbd01324 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..1a953f44183a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2281,18 +2293,17 @@ static int dummy_hub_control(
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- set_link_state(dum_hcd);
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..e86940571b4c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..3fe959104311 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..8f77669f9cf4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..c8784dfafdd7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {