summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-03-08 12:34:12 -0500
committerDavid S. Miller <davem@davemloft.net>2016-03-08 12:34:12 -0500
commit810813c47a564416f6306ae214e2661366c987a7 (patch)
treef0d1f856d4b0024324f642fe519963248828b83f /drivers
parentd66ab51442211158b677c2f12310c314d9587f74 (diff)
parente2857b8f11a289ed2b61d18d0665e05c1053c446 (diff)
downloadlinux-810813c47a564416f6306ae214e2661366c987a7.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, as well as one instance (vxlan) of a bug fix in 'net' overlapping with code movement in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit.c105
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c49
-rw-r--r--drivers/ata/ahci.h5
-rw-r--r--drivers/ata/ahci_xgene.c85
-rw-r--r--drivers/ata/libahci.c63
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c1
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/gpio/gpio-rcar.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c112
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c7
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c153
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c18
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/usb/au0828/au0828-video.c3
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c18
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c45
-rw-r--r--drivers/net/phy/micrel.c28
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c26
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c73
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c9
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/core.c111
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c149
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-layerscape.c21
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c20
-rw-r--r--drivers/usb/serial/Kconfig16
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c3
-rw-r--r--drivers/usb/serial/mxu11x0.c1006
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/sun4v_wdt.c191
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
185 files changed, 2204 insertions, 2008 deletions
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..35947ac87644 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
nfit_mem->bdw = NULL;
}
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
struct nfit_flush *nfit_flush;
- struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
u16 idt_idx, range_index;
- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
- if (nfit_dcr->dcr->region_index != dcr)
- continue;
- nfit_mem->dcr = nfit_dcr->dcr;
- break;
- }
-
- if (!nfit_mem->dcr) {
- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
- spa->range_index, __to_nfit_memdev(nfit_mem)
- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
- return -ENODEV;
- }
-
- /*
- * We've found enough to create an nvdimm, optionally
- * find an associated BDW
- */
- list_add(&nfit_mem->list, &acpi_desc->dimms);
-
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
if (nfit_bdw->bdw->region_index != dcr)
continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
if (!nfit_mem->bdw)
- return 0;
+ return;
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
if (!nfit_mem->spa_bdw)
- return 0;
+ return;
range_index = nfit_mem->spa_bdw->range_index;
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
break;
}
-
- return 0;
}
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev;
int type = nfit_spa_type(spa);
- u16 dcr;
switch (type) {
case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
}
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
- int rc;
+ struct nfit_dcr *nfit_dcr;
+ u32 device_handle;
+ u16 dcr;
if (nfit_memdev->memdev->range_index != spa->range_index)
continue;
found = NULL;
dcr = nfit_memdev->memdev->region_index;
+ device_handle = nfit_memdev->memdev->device_handle;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+ if (__to_nfit_memdev(nfit_mem)->device_handle
+ == device_handle) {
found = nfit_mem;
break;
}
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
if (!nfit_mem)
return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list);
+ list_add(&nfit_mem->list, &acpi_desc->dimms);
+ }
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != dcr)
+ continue;
+ /*
+ * Record the control region for the dimm. For
+ * the ACPI 6.1 case, where there are separate
+ * control regions for the pmem vs blk
+ * interfaces, be sure to record the extended
+ * blk details.
+ */
+ if (!nfit_mem->dcr)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ else if (nfit_mem->dcr->windows == 0
+ && nfit_dcr->dcr->windows)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ break;
+ }
+
+ if (dcr && !nfit_mem->dcr) {
+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+ spa->range_index, dcr);
+ return -ENODEV;
}
if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_dcr = nfit_idt->idt;
break;
}
+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
} else {
/*
* A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
*/
nfit_mem->memdev_pmem = nfit_memdev->memdev;
}
-
- if (found)
- continue;
-
- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
- if (rc)
- return rc;
}
return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
case 1:
/* ARS unsupported, but we should never get here */
return 0;
- case 2:
- return -EINVAL;
- case 3:
+ case 6:
/* ARS is in progress */
msleep(1000);
break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
}
static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_status *cmd)
+ struct nd_cmd_ars_status *cmd, u32 size)
{
int rc;
while (1) {
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
- sizeof(*cmd));
+ size);
if (rc || cmd->status & 0xffff)
return -ENXIO;
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
case 2:
/* No ARS performed for the current boot */
return 0;
+ case 3:
+ /* TODO: error list overflow support */
default:
return -ENXIO;
}
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_start *ars_start = NULL;
struct nd_cmd_ars_cap *ars_cap = NULL;
u64 start, len, cur, remaining;
+ u32 ars_status_size;
int rc;
ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
start = ndr_desc->res->start;
len = ndr_desc->res->end - ndr_desc->res->start + 1;
+ /*
+ * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
+ * Scrub' flag in extended status is not set, skip this but continue
+ * initialization
+ */
rc = ars_get_cap(nd_desc, ars_cap, start, len);
+ if (rc == -ENOTTY) {
+ dev_dbg(acpi_desc->dev,
+ "Address Range Scrub is not implemented, won't create an error list\n");
+ rc = 0;
+ goto out;
+ }
if (rc)
goto out;
- /*
- * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
- * extended status is not set, skip this but continue initialization
- */
if ((ars_cap->status & 0xffff) ||
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
* Check if a full-range ARS has been run. If so, use those results
* without having to start a new ARS.
*/
- ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
- GFP_KERNEL);
+ ars_status_size = ars_cap->max_ars_out;
+ ars_status = kzalloc(ars_status_size, GFP_KERNEL);
if (!ars_status) {
rc = -ENOMEM;
goto out;
}
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
if (rc)
goto out;
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
- pci_set_managed_irq(dev, rc);
+ dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin || !pci_has_managed_irq(dev))
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+#ifdef CONFIG_PM
+ if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- pci_reset_managed_irq(dev);
+ dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
* enabled system.
*/
+#define ACPI_MAX_IRQS 256
#define ACPI_MAX_ISA_IRQ 16
#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
-static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
+static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
+ /* >IRQ15 */
};
-struct irq_penalty_info {
- int irq;
- int penalty;
- struct list_head node;
-};
-
-static LIST_HEAD(acpi_irq_penalty_list);
-
-static int acpi_irq_get_penalty(int irq)
-{
- struct irq_penalty_info *irq_info;
-
- if (irq < ACPI_MAX_ISA_IRQ)
- return acpi_irq_isa_penalty[irq];
-
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq)
- return irq_info->penalty;
- }
-
- return 0;
-}
-
-static int acpi_irq_set_penalty(int irq, int new_penalty)
-{
- struct irq_penalty_info *irq_info;
-
- /* see if this is a ISA IRQ */
- if (irq < ACPI_MAX_ISA_IRQ) {
- acpi_irq_isa_penalty[irq] = new_penalty;
- return 0;
- }
-
- /* next, try to locate from the dynamic list */
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq) {
- irq_info->penalty = new_penalty;
- return 0;
- }
- }
-
- /* nope, let's allocate a slot for this IRQ */
- irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
- if (!irq_info)
- return -ENOMEM;
-
- irq_info->irq = irq;
- irq_info->penalty = new_penalty;
- list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
-
- return 0;
-}
-
-static void acpi_irq_add_penalty(int irq, int penalty)
-{
- int curpen = acpi_irq_get_penalty(irq);
-
- acpi_irq_set_penalty(irq, curpen + penalty);
-}
-
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
- if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) {
- int irqpos = link->irq.possible[i];
-
- acpi_irq_add_penalty(irqpos, penalty);
- }
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
+ acpi_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
}
} else if (link->irq.active) {
- acpi_irq_add_penalty(link->irq.active,
- PIRQ_PENALTY_PCI_POSSIBLE);
+ acpi_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
}
}
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
- if (acpi_irq_get_penalty(irq) >
- acpi_irq_get_penalty(link->irq.possible[i]))
+ if (acpi_irq_penalty[irq] >
+ acpi_irq_penalty[link->irq.possible[i]])
irq = link->irq.possible[i];
}
}
- if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
"Try pci=noacpi or acpi=off\n",
acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
- acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING);
-
+ acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
}
/*
- * modify penalty from cmdline
+ * modify acpi_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
if (irq < 0)
continue;
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ continue;
+
if (used)
- acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED);
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
else
- acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE);
+ acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
if (retval != 2) /* no next number */
break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (irq >= 0)
- acpi_irq_add_penalty(irq, active ?
- PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
bool acpi_isa_irq_available(int irq)
{
- return irq >= 0 &&
- (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
}
/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
*/
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
{
- int penalty;
-
- if (irq < 0)
- return;
-
- if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
- polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
- penalty = PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty = PIRQ_PENALTY_PCI_USING;
-
- acpi_irq_add_penalty(irq, penalty);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 546a3692774f..146dc0b8ec61 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
+#ifdef CONFIG_ARM64
+/*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+ * Workaround is to make sure all pending IRQs are served before leaving
+ * handler.
+ */
+static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int rc = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+ unsigned int handled = 1;
+
+ VPRINTK("ENTER\n");
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ do {
+ irq_masked = irq_stat & hpriv->port_map;
+ spin_lock(&host->lock);
+ rc = ahci_handle_port_intr(host, irq_masked);
+ if (!rc)
+ handled = 0;
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ spin_unlock(&host->lock);
+ } while (irq_stat);
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+#endif
+
/*
* ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
* to single msi.
@@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+#ifdef CONFIG_ARM64
+ if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+ hpriv->irq_handler = ahci_thunderx_irq_handler;
+#endif
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a44c75d4c284..167ba7e3b92e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,8 +240,7 @@ enum {
error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
- AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as
- Edge Triggered */
+
#ifdef CONFIG_PCI_MSI
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@@ -361,6 +360,7 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated.
*/
void (*start_engine)(struct ata_port *ap);
+ irqreturn_t (*irq_handler)(int irq, void *dev_instance);
};
#ifdef CONFIG_PCI_MSI
@@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..8e3f7faf00d3 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -548,6 +548,88 @@ softreset_retry:
return rc;
}
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ * traverse the rest of port's PORT_IRQ_STAT register
+ * to check if an interrupt is triggered at that point else
+ * go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ * then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+ u32 irq_masked)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *port_mmio;
+ int i;
+
+ if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+ for (i = 0; i < host->n_ports; i++) {
+ if (irq_masked & (1 << i))
+ continue;
+
+ port_mmio = ahci_port_base(host->ports[i]);
+ if (readl(port_mmio + PORT_IRQ_STAT))
+ irq_masked |= (1 << i);
+ }
+ }
+
+ return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int rc = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ /*
+ * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+ * it should be cleared before all the port events are cleared.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(rc);
+}
+
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_NCQ;
break;
case XGENE_AHCI_V2:
- hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
+ hpriv->flags |= AHCI_HFLAG_YES_FBS;
+ hpriv->irq_handler = xgene_ahci_irq_intr;
break;
default:
break;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 402967902cbe..85ea5142a095 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
const char *buf, size_t size);
static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
+
+ if (!hpriv->irq_handler)
+ hpriv->irq_handler = ahci_single_level_irq_intr;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_HPCP) ||
- ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+ if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
@@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
{
unsigned int i, handled = 0;
@@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
return handled;
}
-
-static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int rc = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = hpriv->mmio;
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- /*
- * HOST_IRQ_STAT behaves as edge triggered latch meaning that
- * it should be cleared before all the port events are cleared.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- rc = ahci_handle_port_intr(host, irq_masked);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(rc);
-}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
{
@@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
+ if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->irq_handler)
+ dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
+ and custom irq handler implemented\n");
+
rc = ahci_host_activate_multi_irqs(host, sht);
- else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
- rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
- IRQF_SHARED, sht);
- else
- rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
+ } else {
+ rc = ata_host_activate(host, irq, hpriv->irq_handler,
IRQF_SHARED, sht);
+ }
+
+
return rc;
}
EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg)
{
- int val = -EINVAL, rc = -EINVAL;
+ unsigned long val;
+ int rc = -EINVAL;
unsigned long flags;
switch (cmd) {
- case ATA_IOC_GET_IO32:
+ case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
- if (copy_to_user(arg, &val, 1))
- return -EFAULT;
- return 0;
+ return put_user(val, (unsigned long __user *)arg);
- case ATA_IOC_SET_IO32:
+ case HDIO_SET_32BIT:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <asm/mach-rc32434/rb.h>
+
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio;
struct resource *res;
struct ata_host *ah;
+ struct cf_device *pdata;
struct rb532_cf_info *info;
int ret;
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT;
}
- gpio = irq_to_gpio(irq);
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ gpio = pdata->gpio_pin;
if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
EXPORT_SYMBOL(get_random_int);
/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+ __u32 *hash;
+ unsigned long ret;
+
+ if (arch_get_random_long(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
+ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = *(unsigned long *)hash;
+ put_cpu_var(get_random_int_hash);
+
+ return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
+/*
* randomize_range() returns a start address such that
*
* [...... <range> .....]
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
parent = clk_hw_get_parent(hw);
- if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+ if (clk_hw_get_rate(hw) ==
+ clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
r = _omap3_noncore_dpll_bypass(clk);
} else {
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..f93511031177 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -296,6 +296,7 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+ depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0031069b64c9..14b1f9393b05 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
SoCs.
config ARM_MT8173_CPUFREQ
- bool "Mediatek MT8173 CPUFreq support"
+ tristate "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
- depends on !CPU_THERMAL || THERMAL=y
+ depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 1efba340456d..2058e6d292ce 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
clk_set_min_rate(tegra->emc_clock, rate);
clk_set_rate(tegra->emc_clock, 0);
+ *freq = rate;
+
return 0;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index f2a0310ae771..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN;
+ if (sw_desc->cyclic)
+ sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
}
static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ if (to_pxad_sw_desc(vd)->cyclic) {
+ vchan_cyclic_callback(vd);
+ break;
+ }
if (is_desc_completed(vd)) {
list_del(&vd->node);
vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
- dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+ dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cf41440aff91..d9ab0cd1d205 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static void gpio_rcar_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_get_sync(&p->pdev->dev);
+}
+
+static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
+
+static int gpio_rcar_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+ int error;
+
+ error = pm_runtime_get_sync(&p->pdev->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static void gpio_rcar_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+ irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
+ irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
+ irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
+ irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
- if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0a805..8297bc319369 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i;
- int vpos, hpos, stat, min_udelay;
+ unsigned i, repcnt = 4;
+ int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (amdgpuCrtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f782cd14..d20c2a8929cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
ttm_eu_backoff_reservation(&ticket, &list);
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+ !amdgpu_vm_debug)
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84eaea4a..95a4a25d8df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
if (adev->pp_enabled) {
enum amd_dpm_forced_level level;
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amdgpu_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMDGPU_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
goto fail;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
mutex_unlock(&adev->pm.mutex);
}
fail:
- mutex_unlock(&adev->pm.mutex);
-
return count;
}
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
temp = 0;
else
@@ -634,11 +648,6 @@ force:
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -655,6 +664,12 @@ force:
amdgpu_dpm_post_set_power_state(adev);
+ /* update displays */
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct drm_device *ddev = adev->ddev;
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
return 0;
}
- if (adev->pp_enabled) {
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (adev->pp_enabled) {
amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b9d0d55f6b47..3cb6d6c413c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled)
+ if (adev->pp_enabled) {
amdgpu_pm_sysfs_init(adev);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
+ }
#endif
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 9056355309d1..e7ef2261ff4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
AMD_PG_STATE_GATE);
cz_enable_vce_dpm(adev, false);
- /* TODO: to figure out why vce can't be poweroff. */
- /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
+ cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
pi->vce_power_gated = true;
} else {
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
+ cz_enable_vce_dpm(adev, !gate);
}
-
- return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7732059ae30f..06602df707f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8f8ec37ecd88..7086ac17abee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
@@ -4995,7 +4996,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
+ PRIV_REG_INT_ENABLE, 1);
WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index aa67244a77ae..589599f66fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
data.requested_ui_label = power_state_convert(ps);
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
}
- break;
+ case AMD_PP_EVENT_COMPLETE_INIT:
+ ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 83be3cf210e0..6b52c78cb404 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
};
static const pem_event_action *complete_init_event[] = {
+ unblock_adjust_power_state_tasks,
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
static int pem_init(struct pp_eventmgr *eventmgr)
{
int result = 0;
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
/* Initialize PowerPlay feature info */
pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
static void pem_fini(struct pp_eventmgr *eventmgr)
{
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
pem_uninit_featureInfo(eventmgr);
pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ad7700822a1c..ff08ce41bde9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
} else {
cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, true);
+ cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9759009d1da3..b1480acbb3c3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
- if (data & 0x400)
+ if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb7276c..cf39ed3133d6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe))) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c IER:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
+ enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
- return ret;
+ goto out;
/* none -> real source transition */
if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
sizeof(pipe_crc->entries[0]),
GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_enable_ips(crtc);
}
- return 0;
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e7cd311e9fbb..b0847b915545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@ struct intel_csr {
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
+ uint32_t dc_state;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1af036..a7b4a524fadd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a85997a..647d85e77c2f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]);
}
+
+ dev_priv->csr.dc_state = 0;
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 54a165b9c92d..0f3df2c39f7c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1969,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
- return false;
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+ ret = false;
+ goto out;
+ }
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
@@ -1987,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
- return (type == DRM_MODE_CONNECTOR_HDMIA);
+ ret = type == DRM_MODE_CONNECTOR_HDMIA;
+ break;
case TRANS_DDI_MODE_SELECT_DP_SST:
- if (type == DRM_MODE_CONNECTOR_eDP)
- return true;
- return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ ret = type == DRM_MODE_CONNECTOR_eDP ||
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+
case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then
* connector isn't connected */
- return false;
+ ret = false;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
- return (type == DRM_MODE_CONNECTOR_VGA);
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ break;
default:
- return false;
+ ret = false;
+ break;
}
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2015,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
- return false;
+ goto out;
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2041,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
break;
}
- return true;
- } else {
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ ret = true;
- if ((tmp & TRANS_DDI_PORT_MASK)
- == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
- return false;
+ goto out;
+ }
- *pipe = i;
- return true;
- }
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ goto out;
+
+ *pipe = i;
+ ret = true;
+
+ goto out;
}
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
- return false;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2508,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & WRPLL_PLL_ENABLE;
}
@@ -2523,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & SPLL_PLL_ENABLE;
}
@@ -2645,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
- return false;
+ goto out;
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2664,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
+ ret = true;
- return true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2932,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
- return false;
+ goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2985,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3120,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
{
u32 temp;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true;
}
+
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5feb65725c04..46947fffd599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
+ enum intel_display_power_domain power_domain;
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
- cur_state = false;
- } else {
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
+
+ intel_display_power_put(dev_priv, power_domain);
+ } else {
+ cur_state = false;
}
I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
+
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum intel_display_power_domain pfit_domain;
+ enum intel_display_power_domain power_domain;
+ unsigned long power_domain_mask;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ power_domain_mask = BIT(power_domain);
+
+ ret = false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
- return false;
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ goto out;
+ power_domain_mask |= BIT(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
haswell_get_ddi_port_state(crtc, pipe_config);
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
skl_init_scalers(dev, crtc, pipe_config);
}
- pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
- if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- return true;
+ ret = true;
+
+out:
+ for_each_power_domain(power_domain, power_domain_mask)
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & DPLL_VCO_ENABLE;
}
@@ -15568,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1bbd67b046da..1d8de43bed56 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
- return false;
+ goto out;
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
- return true;
+ ret = true;
+
+ goto out;
}
}
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
*pipe = PORT_TO_PIPE(tmp);
}
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_dp_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea5415851c6e..df7f3cb66056 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
enable_rpm_wakeref_asserts(dev_priv)
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa2f616..0193c62a53ef 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
+ bool ret;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
- return true;
+ ret = true;
+
+ goto out;
}
}
}
+out:
+ intel_display_power_put(dev_priv, power_domain);
- return false;
+ return ret;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639a489d..cb5d1b15755c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240caf81..bc04d8d29acb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a234687792f0..b28c29f20e75 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2829,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
memset(ddb, 0, sizeof(*ddb));
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
for_each_plane(dev_priv, pipe, plane) {
@@ -2841,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbffe829a..4f43d9b32e66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
}
}
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
val &= ~mask;
val |= state;
- I915_WRITE(DC_STATE_EN, val);
- POSTING_READ(DC_STATE_EN);
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
}
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
chv_set_pipe_power_well(dev_priv, power_well, false);
}
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+}
+
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_runtime_pm_get(dev_priv);
- power_domains = &dev_priv->power_domains;
+ mutex_lock(&power_domains->lock);
+
+ __intel_display_power_get_domain(dev_priv, domain);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool is_enabled;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return false;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
}
- power_domains->domain_use_count[domain]++;
-
mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled)
+ intel_runtime_pm_put(dev_priv);
+
+ return is_enabled;
}
/**
@@ -2213,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
*/
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
{
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_display_core_uninit(dev_priv);
-
/*
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_display_core_uninit(dev_priv);
}
/**
@@ -2246,6 +2336,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
}
/**
+ * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference if the device is
+ * already in use and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ int ret = pm_runtime_get_if_in_use(device);
+
+ /*
+ * In cases runtime PM is disabled by the RPM core and we get
+ * an -EINVAL return value we are not supposed to call this
+ * function, since the power state is undefined. This applies
+ * atm to the late/early system suspend/resume handlers.
+ */
+ WARN_ON_ONCE(ret < 0);
+ if (ret <= 0)
+ return false;
+ }
+
+ atomic_inc(&dev_priv->pm.wakeref_count);
+ assert_rpm_wakelock_held(dev_priv);
+
+ return true;
+}
+
+/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
- struct nvkm_device *device;
+ struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
- *pdevice = &tdev->device;
+
tdev->func = func;
tdev->pdev = pdev;
tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd))
- return PTR_ERR(tdev->vdd);
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->rst))
- return PTR_ERR(tdev->rst);
+ if (IS_ERR(tdev->rst)) {
+ ret = PTR_ERR(tdev->rst);
+ goto free;
+ }
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->clk))
- return PTR_ERR(tdev->clk);
+ if (IS_ERR(tdev->clk)) {
+ ret = PTR_ERR(tdev->clk);
+ goto free;
+ }
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(tdev->clk_pwr))
- return PTR_ERR(tdev->clk_pwr);
+ if (IS_ERR(tdev->clk_pwr)) {
+ ret = PTR_ERR(tdev->clk_pwr);
+ goto free;
+ }
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
- return ret;
+ goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
- return ret;
+ goto powerdown;
+
+ *pdevice = &tdev->device;
return 0;
+
+powerdown:
+ nvkm_device_tegra_power_down(tdev);
+remove:
+ nvkm_device_tegra_remove_iommu(tdev);
+free:
+ kfree(tdev);
+ return ret;
}
#else
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
.outp = outp,
}, *dp = &_dp;
u32 datarate = 0;
+ u8 pwr;
int ret;
if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
/* disable link interrupt handling during link training */
nvkm_notify_put(&outp->irq);
+ /* ensure sink is not in a low-power state */
+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+ pwr &= ~DPCD_SC00_SET_POWER;
+ pwr |= DPCD_SC00_SET_POWER_D0;
+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+ }
+ }
+
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+/* DPCD Sink Control */
+#define DPCD_SC00 0x00600
+#define DPCD_SC00_SET_POWER 0x03
+#define DPCD_SC00_SET_POWER_D0 0x01
+#define DPCD_SC00_SET_POWER_D3 0x03
+
void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 902b59cebac5..4197ca1bb1e4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
- drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 298ea1c453c3..2b9ba03a7c1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay;
+ int vpos, hpos, stat, min_udelay = 0;
+ unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (radeon_crtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 248c5a9fb0b6..0f14d897baf9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1079,12 +1079,6 @@ force:
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
-
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1101,6 +1095,13 @@ force:
radeon_dpm_post_set_power_state(rdev);
+ /* update displays */
+ radeon_dpm_display_configuration_changed(rdev);
+
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index da462afcb225..dd2dbb9746ce 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -18,6 +18,7 @@
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include "bus.h"
#include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
+ of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 314bf3718cc7..ff348690df94 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
.nb_bases = 8,
.init = host1x01_init,
.sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
.nb_bases = 12,
.init = host1x02_init,
.sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
.nb_bases = 64,
.init = host1x04_init,
.sync_offset = 0x2100,
+ .dma_mask = DMA_BIT_MASK(34),
};
static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
.nb_bases = 64,
.init = host1x05_init,
.sync_offset = 0x2100,
+ .dma_mask = DMA_BIT_MASK(34),
};
static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+
if (host->info->init) {
err = host->info->init(host);
if (err)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 0b6e8e9629c5..dace124994bb 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -96,6 +96,7 @@ struct host1x_info {
int nb_mlocks; /* host1x: number of mlocks */
int (*init)(struct host1x *); /* initialize per SoC ops */
int sync_offset;
+ u64 dma_mask; /* mask of addressable memory */
};
struct host1x {
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3711df1d4526..4a45408dd820 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
- GFP_KERNEL);
+ dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
if (!dev->bsc_regmap)
return -ENOMEM;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..94b80a51ab68 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n");
+ ib_cache_cleanup_one(device);
goto out;
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..1e37f3515d98 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
}
}
- if (rec->hop_limit > 1 || use_roce) {
+ if (rec->hop_limit > 0 || use_roce) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size,
- in_len - sizeof(cmd), out_len - resp_size);
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen)
+ struct ib_udata *udata, int buf_size, int *inlen,
+ int is_xrc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
int ncont;
u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
- int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
- if (drv_data < 0)
- return -EINVAL;
-
- ucmdlen = (drv_data < sizeof(ucmd)) ?
- drv_data : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- if (drv_data > sizeof(ucmd) &&
+ if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
- drv_data - sizeof(ucmd)))
+ udata->inlen - sizeof(ucmd)))
return -EINVAL;
- err = get_srq_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ if (is_xrc) {
+ err = get_srq_user_index(to_mucontext(pd->uobject->context),
+ &ucmd, udata->inlen, &uidx);
+ if (err)
+ return err;
+ }
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen)
+ int *inlen, int is_xrc)
{
int err;
int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
+ is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+
if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+ err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
+ is_xrc);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+ err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
+ is_xrc);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e5e223938eec..374c129219ef 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
+static void detach_device(struct device *dev);
/*
* For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
if (!dev_data)
return;
+ if (dev_data->domain)
+ detach_device(dev);
+
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write);
+
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
}
/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ * BIOS should enable ATS write permission check by setting
+ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x30) ||
+ (boot_cpu_data.x86_model > 0x3f))
+ return;
+
+ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+ value = iommu_read_l2(iommu, 0x47);
+
+ if (value & BIT(0))
+ return;
+
+ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+ iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
+ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
+ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
}
amd_iommu_erratum_746_workaround(iommu);
+ amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
- struct amd_iommu *iommu;
u32 offset;
u32 max_offset_lim;
- /* Make sure the IOMMU PC resource is available */
- if (!amd_iommu_pc_present)
- return -ENODEV;
-
- /* Locate the iommu associated with the device ID */
- iommu = amd_iommu_rlookup_table[devid];
-
/* Check for valid iommu and pc register indexing */
- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
+ if (WARN_ON((fxn > 0x28) || (fxn & 7)))
return -ENODEV;
offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
+
+int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write)
+{
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present || iommu == NULL)
+ return -ENODEV;
+
+ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
+ value, is_write);
+}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index fb092f3f11cb..8ffd7568fc91 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions */
if (pdev->is_virtfn)
return NOTIFY_DONE;
- if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+ if (action != BUS_NOTIFY_ADD_DEVICE &&
+ action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
down_write(&dmar_global_lock);
if (action == BUS_NOTIFY_ADD_DEVICE)
dmar_pci_bus_add_dev(info);
- else if (action == BUS_NOTIFY_DEL_DEVICE)
+ else if (action == BUS_NOTIFY_REMOVED_DEVICE)
dmar_pci_bus_del_dev(info);
up_write(&dmar_global_lock);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 986a53e3eb96..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
rmrru->devices_cnt);
if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt);
}
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
else if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt))
break;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0a73632b28d5..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -78,6 +78,9 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
+
struct event_lpi_map {
unsigned long *lpi_map;
u16 *col_map;
@@ -600,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
lpi_set_config(d, true);
}
-static void its_eoi_irq(struct irq_data *d)
-{
- gic_write_eoir(d->hwirq);
-}
-
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -641,7 +639,7 @@ static struct irq_chip its_irq_chip = {
.name = "ITS",
.irq_mask = its_mask_irq,
.irq_unmask = its_unmask_irq,
- .irq_eoi = its_eoi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
@@ -846,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
int order = get_order(psz);
- int alloc_size;
int alloc_pages;
u64 tmp;
void *base;
@@ -878,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
}
}
- alloc_size = (1 << order) * PAGE_SIZE;
retry_alloc_baser:
- alloc_pages = (alloc_size / psz);
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -933,7 +929,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, alloc_size);
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -966,7 +962,7 @@ retry_baser:
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(alloc_size / entry_size),
+ (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 7e9cbf757e95..fb7ed730d932 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
if (!client->dev.platform_data) {
dev_err(&client->dev,
"Neither DT not platform data provided\n");
- return EINVAL;
+ return -EINVAL;
}
flash->platform_data = client->dev.platform_data;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f8dd7505b529..e1719ffdfb3d 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
}
/* tx 5v detect */
- tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+ tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
- io_write(sd, 0x71, tx_5v);
adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled)
*handled = true;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 8c54fd21022e..a13625722848 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
ent->function = MEDIA_ENT_F_CONN_RF;
break;
default: /* AU0828_VMUX_DEBUG */
- ent->function = MEDIA_ENT_F_CONN_TEST;
- break;
+ continue;
}
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4c1903f781fc..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
- } while (cputime_to_usecs(delta) > 16);
+ } while (tb_to_ns(delta) > 16000);
return 0;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
dma_release_channel(host->tx_chan);
if (host->rx_chan)
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->upd_buf = vmalloc(req->bytes);
+ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 575790e8a75a..74a7dfecee27 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (clear_intf)
mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
- if (eflag)
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
mcp251x_write_bits(spi, EFLG, eflag, 0x00);
/* Update can state */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5eee62badf45..cbc99d5649af 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev)
{
unregister_candev(dev->netdev);
- free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted);
- kfree(dev);
+ free_candev(dev->netdev);
}
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* save error code to return later */
+ rc = PTR_ERR(dev->canch[i]);
+
/* on failure destroy previously created candevs */
icount = i;
- for (i = 0; i < icount; i++) {
+ for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]);
- dev->canch[i] = NULL;
- }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
return rc;
}
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++) {
- struct gs_can *can = dev->canch[i];
-
- if (!can)
- continue;
-
- gs_destroy_candev(can);
- }
+ for (i = 0; i < GS_MAX_INTF; i++)
+ if (dev->canch[i])
+ gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
+ kfree(dev);
}
static const struct usb_device_id gs_usb_table[] = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b881edc0b0a..d81fceddbe0e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2455,7 +2455,7 @@ boomerang_interrupt(int irq, void *dev_id)
int i;
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
- le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
PCI_DMA_TODEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 17472851674f..f749e4d389eb 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
priv->mdio->id);
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
priv->mdio = NULL;
}
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index f71ab2647a3b..08a23e6b60e9 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (of_phy_is_fixed_link(pdev->dev.of_node)) {
+ ret = of_phy_register_fixed_link(pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "bad fixed-link spec\n");
+ goto err_free_bus;
+ }
+ priv->phy_node = of_node_get(pdev->dev.of_node);
+ }
+
+ if (!priv->phy_node)
+ priv->phy_node = of_parse_phandle(pdev->dev.of_node,
+ "phy-handle", 0);
+
if (!priv->phy_node) {
dev_err(&pdev->dev, "no PHY specified\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 04523d41b873..f8b810313094 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4901,9 +4901,9 @@ struct c2s_pri_trans_table_entry {
* cfc delete event data
*/
struct cfc_del_event_data {
- u32 cid;
- u32 reserved0;
- u32 reserved1;
+ __le32 cid;
+ __le32 reserved0;
+ __le32 reserved1;
};
@@ -5119,15 +5119,9 @@ struct vf_pf_channel_zone_trigger {
* zone that triggers the in-bound interrupt
*/
struct trigger_vf_zone {
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 reserved0;
- struct vf_pf_channel_zone_trigger vf_pf_channel;
-#elif defined(__LITTLE_ENDIAN)
struct vf_pf_channel_zone_trigger vf_pf_channel;
u8 reserved0;
u16 reserved1;
-#endif
u32 reserved2;
};
@@ -5212,9 +5206,9 @@ struct e2_integ_data {
* set mac event data
*/
struct eth_event_data {
- u32 echo;
- u32 reserved0;
- u32 reserved1;
+ __le32 echo;
+ __le32 reserved0;
+ __le32 reserved1;
};
@@ -5224,9 +5218,9 @@ struct eth_event_data {
struct vf_pf_event_data {
u8 vf_id;
u8 reserved0;
- u16 reserved1;
- u32 msg_addr_lo;
- u32 msg_addr_hi;
+ __le16 reserved1;
+ __le32 msg_addr_lo;
+ __le32 msg_addr_hi;
};
/*
@@ -5235,9 +5229,9 @@ struct vf_pf_event_data {
struct vf_flr_event_data {
u8 vf_id;
u8 reserved0;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
};
/*
@@ -5246,9 +5240,9 @@ struct vf_flr_event_data {
struct malicious_vf_event_data {
u8 vf_id;
u8 err_id;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
};
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5c95d0c3b076..b597c32275aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5282,14 +5282,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
{
unsigned long ramrod_flags = 0;
int rc = 0;
- u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+ u32 cid = echo & BNX2X_SWCID_MASK;
struct bnx2x_vlan_mac_obj *vlan_mac_obj;
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
- >> BNX2X_SWCID_SHIFT) {
+ switch (echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -5310,8 +5310,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
bnx2x_handle_mcast_eqe(bp);
return;
default:
- BNX2X_ERR("Unsupported classification command: %d\n",
- elem->message.data.eth_event.echo);
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
return;
}
@@ -5480,9 +5479,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
}
- /* elem CID originates from FW; actually LE */
- cid = SW_CID((__force __le32)
- elem->message.data.cfc_del_event.cid);
opcode = elem->message.opcode;
/* handle eq element */
@@ -5505,6 +5501,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* we may want to verify here that the bp state is
* HALTING
*/
+
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+
DP(BNX2X_MSG_SP,
"got delete ramrod for MULTI[%d]\n", cid);
@@ -5598,10 +5598,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_OPENING_WAIT4_PORT):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- cid = elem->message.data.eth_event.echo &
- BNX2X_SWCID_MASK;
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
- cid);
+ SW_CID(elem->message.data.eth_event.echo));
rss_raw->clear_pending(rss_raw);
break;
@@ -5686,7 +5684,7 @@ static void bnx2x_sp_task(struct work_struct *work)
if (status & BNX2X_DEF_SB_IDX) {
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- if (FCOE_INIT(bp) &&
+ if (FCOE_INIT(bp) &&
(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/* Prevent local bottom-halves from running as
* we are going to change the local NAPI list.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9d027348cd09..632daff117d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
{
unsigned long ramrod_flags = 0;
int rc = 0;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
/* Always push next commands out, don't wait here */
set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
&ramrod_flags);
@@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
&ramrod_flags);
break;
default:
- BNX2X_ERR("Unsupported classification command: %d\n",
- elem->message.data.eth_event.echo);
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
return;
}
if (rc < 0)
@@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
switch (opcode) {
case EVENT_RING_OPCODE_CFC_DEL:
- cid = SW_CID((__force __le32)
- elem->message.data.cfc_del_event.cid);
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
- cid = (elem->message.data.eth_event.echo &
- BNX2X_SWCID_MASK);
+ cid = SW_CID(elem->message.data.eth_event.echo);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_VF_FLR:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 1374e5394a79..bfae300cf25f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
/* Update VFDB with current message and schedule its handling */
mutex_lock(&BP_VFDB(bp)->event_mutex);
- BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
- BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
+ le32_to_cpu(vfpf_event->msg_addr_hi);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
+ le32_to_cpu(vfpf_event->msg_addr_lo);
BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
mutex_unlock(&BP_VFDB(bp)->event_mutex);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ce6b075842ee..4dfc25f042e8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -248,7 +248,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
- end = PTR_ALIGN(pdata + length + 1, 8) - 1;
+ end = pdata + length;
+ end = PTR_ALIGN(end, 8) - 1;
*end = 0;
skb_copy_from_linear_data(skb, pdata, len);
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 04b0d16b210e..95bc470ae441 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
if (!list_empty(&rxf->ucast_pending_add_q)) {
mac = list_first_entry(&rxf->ucast_pending_add_q,
struct bna_mac, qe);
- list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ list_move_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 00cc9156abbb..092f097a5943 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
+
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4dded90076c8..95f17f8cadac 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
static void nic_init_hw(struct nicpf *nic)
{
int i;
+ u64 cqm_cfg;
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable VLAN ethertype matching and stripping */
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+ /* Check if HW expected value is higher (could be in future chips) */
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
}
/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
-#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CQM_CFG (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ee584c59ff62..fe3763df3f13 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -545,6 +545,7 @@ struct be_adapter {
struct delayed_work be_err_detection_work;
u8 recovery_retries;
u8 err_flags;
+ bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0b9f741f31af..d8540ae95e5a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -666,10 +666,13 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
-#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | \
+ BE_IF_FLAGS_UNTAGGED)
-#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_ALL_PROMISCUOUS)
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 17422b20a8ec..536686476369 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
+#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
+ BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS)
+
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -3558,7 +3563,7 @@ static int be_enable_if_filters(struct be_adapter *adapter)
{
int status;
- status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
if (status)
return status;
@@ -3875,8 +3880,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
int status;
/* If a FW profile exists, then cap_flags are updated */
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ cap_flags = BE_VF_IF_EN_FLAGS;
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
@@ -3892,10 +3896,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ /* PF should enable IF flags during proxy if_create call */
+ en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
status = be_cmd_if_create(adapter, cap_flags, en_flags,
&vf_cfg->if_handle, vf + 1);
if (status)
@@ -5040,6 +5042,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
pci_iounmap(adapter->pdev, adapter->csr);
if (adapter->db)
pci_iounmap(adapter->pdev, adapter->db);
+ if (adapter->pcicfg && adapter->pcicfg_mapped)
+ pci_iounmap(adapter->pdev, adapter->pcicfg);
}
static int db_bar(struct be_adapter *adapter)
@@ -5091,8 +5095,10 @@ static int be_map_pci_bars(struct be_adapter *adapter)
if (!addr)
goto pci_map_err;
adapter->pcicfg = addr;
+ adapter->pcicfg_mapped = true;
} else {
adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ adapter->pcicfg_mapped = false;
}
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 62fa136554ac..41b010645100 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev)
if (priv->mdio) {
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
if (priv->clk)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 623aa1c8ebc6..79a210aaf0bb 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_free;
}
+ fman->dev = &of_dev->dev;
+
return fman;
fman_node_put:
@@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev)
dev_set_drvdata(dev, fman);
- fman->dev = dev;
-
dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a01e5a32b631..08c041548300 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
priv->errata |= GFAR_ERRATA_12;
+ /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
- ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
}
#endif
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 74beb1867230..4ccc032633c4 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -25,6 +25,7 @@ config HIX5HD2_GMAC
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
+ depends on HAS_IOMEM # For MFD_SYSCON
select MARVELL_PHY
select MFD_SYSCON
select HNS_MDIO
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a0070d0e740d..d4f92ed322d6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
{
int ret;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = 0;
+ break;
case MAC_INTERNALLOOP_SERDES:
ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
break;
@@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
default:
ret = -EINVAL;
}
+
+ if (!ret)
+ hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 9439f04962e1..38fc5be3870c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
}
}
+static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 mac_id;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ return;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+ HNS_DSAF_COMM_SERVICE_NW_IDX,
+ &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_QID_M,
+ DSAFV2_SERDES_LBK_QID_S,
+ q_id);
+ q_id += q_num_per_port;
+ }
+}
+
/**
* hns_dsaf_sw_port_type_cfg - cfg sw type
* @dsaf_id: dsa fabric id
@@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
}
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+ return;
+
+ dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_EN_B, !!en);
+}
+
/**
* hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id
@@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
/* set promisc def queue id */
hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+ /* set inner loopback queue id */
+ hns_dsaf_inner_qid_cfg(dsaf_dev);
+
/* in non switch mode, set all port to access mode */
hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 40205b910f80..5fea226efaf3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index f0c4f9b09d5b..60d695daa471 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -134,6 +134,7 @@
#define DSAF_XGE_INT_STS_0_REG 0x1C0
#define DSAF_PPE_INT_STS_0_REG 0x1E0
#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAFV2_SERDES_LBK_0_REG 0x220
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -857,6 +858,10 @@
#define PPEV2_CFG_RSS_TBL_4N3_S 24
#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
+#define DSAFV2_SERDES_LBK_EN_B 8
+#define DSAFV2_SERDES_LBK_QID_S 0
+#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
+
#define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3df22840fcd1..3c4a3bc31a89 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev,
switch (loop) {
case MAC_INTERNALLOOP_PHY:
- if ((phy_dev) && (!phy_dev->is_c45))
+ if ((phy_dev) && (!phy_dev->is_c45)) {
ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ ret |= h->dev->ops->set_loopback(h, loop, 0x1);
+ }
break;
case MAC_INTERNALLOOP_MAC:
if ((h->dev->ops->set_loopback) &&
@@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb)
{
struct net_device *ndev;
+ struct hns_nic_priv *priv;
struct hnae_ring *ring;
struct netdev_queue *dev_queue;
struct sk_buff *new_skb;
@@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
char buff[33]; /* 32B data and the last character '\0' */
if (!ring_data) { /* Just for doing create frame*/
+ ndev = skb->dev;
+ priv = netdev_priv(ndev);
+
frame_size = skb->len;
memset(skb->data, 0xFF, frame_size);
+ if ((!AE_IS_VER1(priv->enet_ver)) &&
+ (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
+ memcpy(skb->data, ndev->dev_addr, 6);
+ skb->data[5] += 0x1f;
+ }
+
frame_size &= ~1ul;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
memset(&skb->data[frame_size / 2 + 10], 0xBE,
@@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev,
/* place data into test skb */
(void)skb_put(skb, size);
+ skb->dev = ndev;
__lb_other_process(NULL, skb);
skb->queue_mapping = NIC_LB_TEST_RING_ID;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 335417b4756b..ebe60719e489 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1166,7 +1166,10 @@ map_failed:
if (!firmware_has_feature(FW_FEATURE_CMO))
netdev_err(netdev, "tx: unable to map xmit buffer\n");
adapter->tx_map_failed++;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
force_bounce = 1;
goto retry_bounce;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7d6570843723..6e9e16eee5d0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
- cpu_to_be32(adapter->req_tx_entries_per_subcrq);
+ cpu_to_be64(adapter->req_tx_entries_per_subcrq);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
- cpu_to_be32(adapter->req_rx_add_entries_per_subcrq);
+ cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
- crq.request_capability.number = cpu_to_be32(adapter->req_mtu);
+ crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
if (adapter->promisc_supported) {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
- crq.request_capability.number = cpu_to_be32(1);
+ crq.request_capability.number = cpu_to_be64(1);
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
- crq.request_capability.number = cpu_to_be32(0);
+ crq.request_capability.number = cpu_to_be64(0);
ibmvnic_send_crq(adapter, &crq);
}
@@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
switch (be16_to_cpu(crq->query_capability.capability)) {
case MIN_TX_QUEUES:
adapter->min_tx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_tx_queues = %lld\n",
adapter->min_tx_queues);
break;
case MIN_RX_QUEUES:
adapter->min_rx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_queues = %lld\n",
adapter->min_rx_queues);
break;
case MIN_RX_ADD_QUEUES:
adapter->min_rx_add_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
adapter->min_rx_add_queues);
break;
case MAX_TX_QUEUES:
adapter->max_tx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_queues = %lld\n",
adapter->max_tx_queues);
break;
case MAX_RX_QUEUES:
adapter->max_rx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_queues = %lld\n",
adapter->max_rx_queues);
break;
case MAX_RX_ADD_QUEUES:
adapter->max_rx_add_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
adapter->max_rx_add_queues);
break;
case MIN_TX_ENTRIES_PER_SUBCRQ:
adapter->min_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
adapter->min_tx_entries_per_subcrq);
break;
case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
adapter->min_rx_add_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
adapter->min_rx_add_entries_per_subcrq);
break;
case MAX_TX_ENTRIES_PER_SUBCRQ:
adapter->max_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
adapter->max_tx_entries_per_subcrq);
break;
case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
adapter->max_rx_add_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
adapter->max_rx_add_entries_per_subcrq);
break;
case TCP_IP_OFFLOAD:
adapter->tcp_ip_offload =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
adapter->tcp_ip_offload);
break;
case PROMISC_SUPPORTED:
adapter->promisc_supported =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "promisc_supported = %lld\n",
adapter->promisc_supported);
break;
case MIN_MTU:
- adapter->min_mtu = be32_to_cpu(crq->query_capability.number);
+ adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
break;
case MAX_MTU:
- adapter->max_mtu = be32_to_cpu(crq->query_capability.number);
+ adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
break;
case MAX_MULTICAST_FILTERS:
adapter->max_multicast_filters =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_multicast_filters = %lld\n",
adapter->max_multicast_filters);
break;
case VLAN_HEADER_INSERTION:
adapter->vlan_header_insertion =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
if (adapter->vlan_header_insertion)
netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
@@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
break;
case MAX_TX_SG_ENTRIES:
adapter->max_tx_sg_entries =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
adapter->max_tx_sg_entries);
break;
case RX_SG_SUPPORTED:
adapter->rx_sg_supported =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "rx_sg_supported = %lld\n",
adapter->rx_sg_supported);
break;
case OPT_TX_COMP_SUB_QUEUES:
adapter->opt_tx_comp_sub_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
adapter->opt_tx_comp_sub_queues);
break;
case OPT_RX_COMP_QUEUES:
adapter->opt_rx_comp_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
adapter->opt_rx_comp_queues);
break;
case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
adapter->opt_rx_bufadd_q_per_rx_comp_q =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
adapter->opt_rx_bufadd_q_per_rx_comp_q);
break;
case OPT_TX_ENTRIES_PER_SUBCRQ:
adapter->opt_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
adapter->opt_tx_entries_per_subcrq);
break;
case OPT_RXBA_ENTRIES_PER_SUBCRQ:
adapter->opt_rxba_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
adapter->opt_rxba_entries_per_subcrq);
break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1242925ad34c..1a9993cc79b5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -319,10 +319,8 @@ struct ibmvnic_capability {
u8 first;
u8 cmd;
__be16 capability; /* one of ibmvnic_capabilities */
+ __be64 number;
struct ibmvnic_rc rc;
- __be32 number; /*FIX: should be __be64, but I'm getting the least
- * significant word first
- */
} __packed __aligned(8);
struct ibmvnic_login {
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b1de7afd4116..3ddf657bc10b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
}
static inline void
-jme_clear_pm(struct jme_adapter *jme)
+jme_clear_pm_enable_wol(struct jme_adapter *jme)
{
jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
+static inline void
+jme_clear_pm_disable_wol(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_PMCS, PMCS_STMASK);
+}
+
static int
jme_reload_eeprom(struct jme_adapter *jme)
{
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
static void
jme_powersave_phy(struct jme_adapter *jme)
{
- if (jme->reg_pmcs) {
+ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
- jme_clear_pm(jme);
+ jme_clear_pm_enable_wol(jme);
} else {
jme_phy_off(jme);
}
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_MAGIC)
jme->reg_pmcs |= PMCS_MFEN;
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
-
return 0;
}
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
- jme_clear_pm(jme);
- device_set_wakeup_enable(&pdev->dev, true);
+ jme_clear_pm_disable_wol(jme);
+ device_init_wakeup(&pdev->dev, true);
jme_set_phyfifo_5level(jme);
jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
@@ -3312,13 +3315,14 @@ jme_resume(struct device *dev)
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
jme_phy_setEA(jme);
- jme_start_irq(jme);
netif_device_attach(netdev);
atomic_inc(&jme->link_changing);
jme_reset_link(jme);
+ jme_start_irq(jme);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 16b26d17c54c..b4b258c8ca47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2258,7 +2258,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
struct mlx4_en_dev *mdev = en_priv->mdev;
u64 mac_u64 = mlx4_mac_to_u64(mac);
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
return -EINVAL;
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b8a51515e73c..503ec23e84cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1272,6 +1272,7 @@ err_set_port:
static int mlx4_mf_bond(struct mlx4_dev *dev)
{
int err = 0;
+ int nvfs;
struct mlx4_slaves_pport slaves_port1;
struct mlx4_slaves_pport slaves_port2;
DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
@@ -1288,11 +1289,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
return -EINVAL;
}
+ /* number of virtual functions is number of total functions minus one
+ * physical function for each port.
+ */
+ nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
+ bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
+
/* limit on maximum allowed VFs */
- if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
- bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) >
- MAX_MF_BOND_ALLOWED_SLAVES)
+ if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
+ mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
+ nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
return -EINVAL;
+ }
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 787b7bb54d52..211c65087997 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 9c0e80e64b43..dbc2fb89e067 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -237,6 +237,7 @@ struct mlx5e_pport_stats {
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
+ "bytes",
"csum_none",
"csum_sw",
"lro_packets",
@@ -246,16 +247,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_rq_stats {
u64 packets;
+ u64 bytes;
u64 csum_none;
u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 6
+#define NUM_RQ_STATS 7
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
+ "bytes",
"tso_packets",
"tso_bytes",
"tso_inner_packets",
@@ -271,6 +274,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
+ u64 bytes;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
@@ -282,7 +286,7 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 wake;
u64 dropped;
-#define NUM_SQ_STATS 11
+#define NUM_SQ_STATS 12
};
struct mlx5e_stats {
@@ -328,14 +332,9 @@ enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
};
-enum cq_flags {
- MLX5E_CQ_HAS_CQES = 1,
-};
-
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
- unsigned long flags;
/* data path - accessed per napi poll */
struct napi_struct *napi;
@@ -476,6 +475,8 @@ enum mlx5e_traffic_types {
MLX5E_NUM_TT,
};
+#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+
enum mlx5e_rqt_ix {
MLX5E_INDIRECTION_RQT,
MLX5E_SINGLE_RQ_RQT,
@@ -646,9 +647,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe, int bf_sz)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index be6543570b2b..2018eebe1531 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
@@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
@@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
u64 ns;
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
ns = timecounter_read(&tstamp->clock);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
*ts = ns_to_timespec64(ns);
@@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_adjtime(&tstamp->clock, delta);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
@@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
u64 adj;
u32 diff;
+ unsigned long flags;
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
@@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
tstamp->nominal_c_mult + diff;
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 0959656404b3..68834b715f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -386,6 +386,8 @@ static int mlx5e_set_channels(struct net_device *dev,
mlx5e_close_locked(dev);
priv->params.num_channels = count;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
@@ -716,18 +718,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
+static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+ int i;
+
+ MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+ mlx5e_build_tir_ctx_hash(tirc, priv);
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ if (IS_HASHING_TT(i))
+ mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+}
+
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool close_open;
- int err = 0;
+ int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
(hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
mutex_lock(&priv->state_lock);
if (indir) {
@@ -736,11 +756,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
}
- close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
- test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (close_open)
- mlx5e_close_locked(dev);
-
if (key)
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
@@ -748,12 +763,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->params.rss_hfunc = hfunc;
- if (close_open)
- err = mlx5e_open_locked(priv->netdev);
+ mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
- return err;
+ kvfree(in);
+
+ return 0;
}
static int mlx5e_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5063c0e0f8ac..19e5daeaa61d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -143,6 +143,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
return;
/* Collect firts the SW counters and then HW for consistency */
+ s->rx_packets = 0;
+ s->rx_bytes = 0;
+ s->tx_packets = 0;
+ s->tx_bytes = 0;
s->tso_packets = 0;
s->tso_bytes = 0;
s->tso_inner_packets = 0;
@@ -160,6 +164,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
@@ -169,6 +175,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
@@ -233,23 +241,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_broadcast_bytes =
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
- s->rx_packets =
- s->rx_unicast_packets +
- s->rx_multicast_packets +
- s->rx_broadcast_packets;
- s->rx_bytes =
- s->rx_unicast_bytes +
- s->rx_multicast_bytes +
- s->rx_broadcast_bytes;
- s->tx_packets =
- s->tx_unicast_packets +
- s->tx_multicast_packets +
- s->tx_broadcast_packets;
- s->tx_bytes =
- s->tx_unicast_bytes +
- s->tx_multicast_bytes +
- s->tx_broadcast_bytes;
-
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
@@ -1211,7 +1202,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
@@ -1329,7 +1319,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
lro_timer_supported_periods[2]));
}
-static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+{
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
+}
+
+static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1337,6 +1342,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
void *tirc;
int inlen;
int err;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1348,7 +1354,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ if (err)
+ break;
+ }
kvfree(in);
@@ -1703,17 +1713,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
default:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_INDIRECTION_RQT]);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- mlx5e_rx_hash_fn(priv->params.rss_hfunc));
- if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
- void *rss_key = MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key);
- size_t len = MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key);
-
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, priv->params.toeplitz_hash_key, len);
- }
+ mlx5e_build_tir_ctx_hash(tirc, priv);
break;
}
@@ -1950,8 +1950,10 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
+ err);
if (was_opened)
err = mlx5e_open_locked(priv->netdev);
@@ -2253,12 +2255,20 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
}
#endif
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ indirection_rqt[i] = i % num_channels;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2281,8 +2291,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- priv->params.indirection_rqt[i] = i % num_channels;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 519a07f253f9..884ed19cded2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -231,10 +231,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done;
- /* avoid accessing cq (dma coherent memory) if not needed */
- if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
- return 0;
-
for (work_done = 0; work_done < budget; work_done++) {
struct mlx5e_rx_wqe *wqe;
struct mlx5_cqe64 *cqe;
@@ -268,6 +264,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5e_build_rx_skb(cqe, rq, skb);
rq->stats.packets++;
+ rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
napi_gro_receive(cq->napi, skb);
wq_ll_pop:
@@ -280,8 +277,5 @@ wq_ll_pop:
/* ensure cq space is freed before enabling more cqes */
wmb();
- if (work_done == budget)
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
-
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c34f4f3e9537..94a14f85f70d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -177,6 +177,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
unsigned int skb_len = skb->len;
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ unsigned int num_bytes;
bool bf = false;
u16 headlen;
u16 ds_cnt;
@@ -216,16 +217,17 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_bytes += skb->len - ihs;
}
- wi->num_bytes = skb->len +
- (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else {
bf = sq->bf_budget &&
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
- wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
+ wi->num_bytes = num_bytes;
+
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
&skb_len);
@@ -317,6 +319,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
sq->stats.packets++;
+ sq->stats.bytes += num_bytes;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
@@ -345,10 +348,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
u16 sqcc;
int i;
- /* avoid accessing cq (dma coherent memory) if not needed */
- if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
- return false;
-
sq = container_of(cq, struct mlx5e_sq, cq);
npkts = 0;
@@ -432,10 +431,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
- if (i == MLX5E_TX_CQ_POLL_BUDGET) {
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
- return true;
- }
- return false;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 4ac8d716dbdd..66d51a77609e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
barrier();
napi_schedule(cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c071077aafbd..7992c553c1f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
{
int index = q->producer_counter & (q->count - 1);
- if ((q->producer_counter - q->consumer_counter) == q->count)
+ if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
return NULL;
return mlxsw_pci_queue_elem_info_get(q, index);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 53487d3eb9f6..4afbc3e9e381 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2592,9 +2592,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
-
- if (lag->ref_count == 1)
- mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
}
if (lag->ref_count == 1) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 00cfd95ca59d..3e67f451f2ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ndev->base_addr = res->start;
priv->base = devm_ioremap_resource(p_dev, res);
- ret = IS_ERR(priv->base);
- if (ret) {
+ if (IS_ERR(priv->base)) {
dev_err(p_dev, "devm_ioremap_resource failed\n");
+ ret = PTR_ERR(priv->base);
goto init_fail;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->netdev_ops = &qcaspi_netdev_ops;
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
- dev->flags = IFF_MULTICAST;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 537974cfd427..dd2cf3738b73 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
- break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
@@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
- break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
@@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
struct rtl8169_counters *counters = tp->counters;
unsigned int start;
- if (netif_running(dev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ if (netif_running(dev) && pm_runtime_active(&pdev->dev))
rtl8169_rx_missed(dev, ioaddr);
do {
@@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* Fetch additonal counter values missing in stats collected by driver
* from tally counters.
*/
- rtl8169_update_counters(dev);
+ if (pm_runtime_active(&pdev->dev))
+ rtl8169_update_counters(dev);
/*
* Subtract values fetched during initalization.
@@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
+ pm_runtime_put_noidle(&pdev->dev);
+
return stats;
}
@@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
+ /* Update counters before going runtime suspend */
+ rtl8169_rx_missed(dev, tp->mmio_addr);
+ rtl8169_update_counters(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 88656ceb6e29..8f2c4fb4c724 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1708,7 +1708,6 @@ static int ravb_set_gti(struct net_device *ndev)
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct ravb_private *priv;
enum ravb_chip_id chip_id;
struct net_device *ndev;
@@ -1740,8 +1739,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->base_addr = res->start;
ndev->dma = -1;
- match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
- chip_id = (enum ravb_chip_id)match->data;
+ chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
if (chip_id == RCAR_GEN3)
irq = platform_get_irq_byname(pdev, "ch22");
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a2767336b7c5..9c6448915b65 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3044,15 +3044,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- if (id) {
+ if (id)
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
- } else {
- const struct of_device_id *match;
+ else
+ mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
- match = of_match_device(of_match_ptr(sh_eth_match_table),
- &pdev->dev);
- mdp->cd = (struct sh_eth_cpu_data *)match->data;
- }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
if (!mdp->reg_offset) {
dev_err(&pdev->dev, "Unknown register type (%d)\n",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0faf16336035..efb54f356a67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
int addr, found;
- struct device_node *mdio_node = NULL;
- struct device_node *child_node = NULL;
+ struct device_node *mdio_node = priv->plat->mdio_node;
if (!mdio_bus_data)
return 0;
if (IS_ENABLED(CONFIG_OF)) {
- for_each_child_of_node(priv->device->of_node, child_node) {
- if (of_device_is_compatible(child_node,
- "snps,dwmac-mdio")) {
- mdio_node = child_node;
- break;
- }
- }
-
if (mdio_node) {
netdev_dbg(ndev, "FOUND MDIO subnode\n");
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 9cf181f839fd..dcbd2a1601e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -146,6 +146,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ struct device_node *child_node = NULL;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -176,13 +177,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->phy_node = of_node_get(np);
}
+ for_each_child_of_node(np, child_node)
+ if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
+ plat->mdio_node = child_node;
+ break;
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name)
+ if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index fc8bbff2d7e3..af11ed1e0bcc 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -426,7 +426,7 @@
#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
-static int debug = 3;
+static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
@@ -650,6 +650,11 @@ struct net_local {
u32 mmc_tx_counters_mask;
struct dwceqos_flowcontrol flowcontrol;
+
+ /* Tracks the intermediate state of phy started but hardware
+ * init not finished yet.
+ */
+ bool phy_defer;
};
static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
@@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev)
struct phy_device *phydev = lp->phy_dev;
int status_change = 0;
+ if (lp->phy_defer)
+ return;
+
if (phydev->link) {
if ((lp->speed != phydev->speed) ||
(lp->duplex != phydev->duplex)) {
@@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
/* Allocate DMA descriptors */
size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->rx_descs_addr, 0);
+ &lp->rx_descs_addr, GFP_KERNEL);
if (!lp->rx_descs)
goto err_out;
lp->rx_descs_tail_addr = lp->rx_descs_addr +
@@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->tx_descs_addr, 0);
+ &lp->tx_descs_addr, GFP_KERNEL);
if (!lp->tx_descs)
goto err_out;
lp->tx_descs_tail_addr = lp->tx_descs_addr +
@@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp)
regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+ lp->phy_defer = false;
+ mutex_lock(&lp->phy_dev->lock);
+ phy_read_status(lp->phy_dev);
+ dwceqos_adjust_link(lp->ndev);
+ mutex_unlock(&lp->phy_dev->lock);
}
static void dwceqos_tx_reclaim(unsigned long data)
@@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev)
}
netdev_reset_queue(ndev);
+ /* The dwceqos reset state machine requires all phy clocks to complete,
+ * hence the unusual init order with phy_start first.
+ */
+ lp->phy_defer = true;
+ phy_start(lp->phy_dev);
dwceqos_init_hw(lp);
napi_enable(&lp->napi);
- phy_start(lp->phy_dev);
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
@@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
- phy_stop(lp->phy_dev);
-
tasklet_disable(&lp->tx_bdreclaim_tasklet);
- netif_stop_queue(ndev);
napi_disable(&lp->napi);
- dwceqos_drain_dma(lp);
+ /* Stop all tx before we drain the tx dma. */
+ netif_tx_lock_bh(lp->ndev);
+ netif_stop_queue(ndev);
+ netif_tx_unlock_bh(lp->ndev);
- netif_tx_lock(lp->ndev);
+ dwceqos_drain_dma(lp);
dwceqos_reset_hw(lp);
+ phy_stop(lp->phy_dev);
+
dwceqos_descriptor_free(lp);
- netif_tx_unlock(lp->ndev);
return 0;
}
@@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
((trans.initial_descriptor + trans.nr_descriptors) %
DWCEQOS_TX_DCNT));
- dwceqos_tx_finalize(skb, lp, &trans);
-
- netdev_sent_queue(ndev, skb->len);
-
spin_lock_bh(&lp->tx_lock);
lp->tx_free -= trans.nr_descriptors;
+ dwceqos_tx_finalize(skb, lp, &trans);
+ netdev_sent_queue(ndev, skb->len);
spin_unlock_bh(&lp->tx_lock);
ndev->trans_start = jiffies;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 48219c83fb00..4516c8a4fd82 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev)
if (priv->led_mode >= 0)
kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+ if (phy_interrupt_is_valid(phydev)) {
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -636,6 +647,21 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+static int kszphy_resume(struct phy_device *phydev)
+{
+ int value;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, MII_BMCR);
+ phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+
+ kszphy_config_intr(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -845,7 +871,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 04f4eb34fa80..931836e09a6b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
* network traffic (demand mode).
*/
struct ppp *ppp = PF_TO_PPP(pf);
+
+ ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
- (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
+ ppp_recv_unlock(ppp);
break;
+ }
+ ppp_recv_unlock(ppp);
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
@@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
else if (pf->kind == INTERFACE) {
/* see comment in ppp_read */
struct ppp *ppp = PF_TO_PPP(pf);
+
+ ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
(ppp->flags & SC_LOOP_TRAFFIC) == 0)
mask |= POLLIN | POLLRDNORM;
+ ppp_recv_unlock(ppp);
}
return mask;
@@ -2810,6 +2818,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
out2:
mutex_unlock(&pn->all_ppp_mutex);
+ rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 224e7d82de6d..cf77f2dffa69 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev)
netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc0212c3cc28..86ba30ba35e8 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
- /* reset data interface */
+ /* Reset data interface. Some devices will not reset properly
+ * unless they are configured first. Toggle the altsetting to
+ * force a reset
+ */
+ usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
@@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret;
-
/* MBIM backwards compatible function? */
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
@@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
* Additionally, generic NCM devices are assumed to accept arbitrarily
* placed NDP.
*/
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
-
- /*
- * We should get an event when network connection is "connected" or
- * "disconnected". Set network connection in "disconnected" state
- * (carrier is OFF) during attach, so the IP network stack does not
- * start IPv6 negotiation and more.
- */
- usbnet_link_change(dev, 0, 0);
- return ret;
+ return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
}
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = {
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN,
+ | FLAG_LINK_INTR | FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = {
static const struct driver_info wwan_noarp_info = {
.description = "Mobile Broadband Network Device (NO ARP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN | FLAG_NOARP,
+ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 570deef53f74..a3a4ccf7cf52 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -861,8 +861,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -885,6 +887,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0b0ba7ef14e4..10798128c03f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1769,6 +1769,13 @@ out3:
if (info->unbind)
info->unbind (dev, udev);
out1:
+ /* subdrivers must undo all they did in bind() if they
+ * fail it, but we may fail later and a deferred kevent
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
free_netdev(net);
out:
return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..fc895d0e85d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
/*
- * parse and copy relevant protocol headers:
+ * parse relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
* Other effects:
* 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied
- * 3. the portion copied is guaranteed to be in the linear part
+ * 3. the portion to be copied is guaranteed to be in the linear part
*
*/
static int
-vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
- struct vmxnet3_tx_ctx *ctx,
- struct vmxnet3_adapter *adapter)
+vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
{
- struct Vmxnet3_TxDataDesc *tdd;
u8 protocol = 0;
if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
return 0;
}
+ return 1;
+err:
+ return -1;
+}
+
+/*
+ * copy relevant protocol headers to the transmit ring:
+ * For a tso pkt, relevant headers are L2/3/4 including options
+ * For a pkt requesting csum offloading, they are L2/3 and may include L4
+ * if it's a TCP/UDP pkt
+ *
+ *
+ * Note that this requires that vmxnet3_parse_hdr be called first to set the
+ * appropriate bits in ctx first
+ */
+static void
+vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_TxDataDesc *tdd;
+
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
- return 1;
-
-err:
- return -1;
}
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
}
- spin_lock_irqsave(&tq->tx_lock, flags);
-
- if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
- tq->stats.tx_ring_full++;
- netdev_dbg(adapter->netdev,
- "tx queue stopped on %s, next2comp %u"
- " next2fill %u\n", adapter->netdev->name,
- tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
- vmxnet3_tq_stop(tq, adapter);
- spin_unlock_irqrestore(&tq->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
-
-
- ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
+ ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
} else {
tq->stats.drop_hdr_inspect_err++;
- goto unlock_drop_pkt;
+ goto drop_pkt;
}
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ netdev_dbg(adapter->netdev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
+ vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
+
/* fill tx descs related to addr & len */
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
goto unlock_drop_pkt;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9ce088bb28ab..9a9fabb900c1 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -103,20 +103,23 @@ static struct dst_ops vrf_dst_ops = {
#if IS_ENABLED(CONFIG_IPV6)
static bool check_ipv6_frame(const struct sk_buff *skb)
{
- const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
- size_t hlen = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h;
+ struct ipv6hdr _ipv6h;
bool rc = true;
- if (skb->len < hlen)
+ ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+ if (!ipv6h)
goto out;
if (ipv6h->nexthdr == NEXTHDR_ICMP) {
const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
- if (skb->len < hlen + sizeof(*icmph))
+ icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
goto out;
- icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
switch (icmph->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ca243d93b78..fc998a3bd234 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -948,8 +948,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ cb->args[1] = err;
goto out;
+ }
skip:
++idx;
}
@@ -1176,9 +1178,10 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
md->gbp = ntohs(gbp->policy_id);
tun_dst = (struct metadata_dst *)skb_dst(skb);
- if (tun_dst)
+ if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
-
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+ }
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 070e2af05ca2..62ae43d2ab7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -126,7 +126,7 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
-static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
get_order(mvm->fw_paging_db[i].fw_paging_size));
}
kfree(mvm->trans->paging_download_buf);
+ mvm->trans->paging_download_buf = NULL;
+
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ebe37bb0ce4c..cc279e0961fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1244,6 +1244,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+/* Paging */
+void iwl_free_fw_paging(struct iwl_mvm *mvm);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 09a94a5efb61..fd8f4a80ddfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -699,6 +699,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_free_fw_paging(mvm);
+
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 4fbaadda4e99..dae2c40d605c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -425,6 +425,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
}
+ /*
+ * Increase the pending frames counter, so that later when a reply comes
+ * in and the counter is decreased - we don't start getting negative
+ * values.
+ * Note that we don't need to make sure it isn't agg'd, since we're
+ * TXing non-sta
+ */
+ atomic_inc(&mvm->pending_frames[sta_id]);
+
return 0;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
[ND_CMD_ARS_CAP] = {
.in_num = 2,
.in_sizes = { 8, 8, },
- .out_num = 2,
- .out_sizes = { 4, 4, },
+ .out_num = 4,
+ .out_sizes = { 4, 4, 4, 4, },
},
[ND_CMD_ARS_START] = {
- .in_num = 4,
- .in_sizes = { 8, 8, 2, 6, },
- .out_num = 1,
- .out_sizes = { 4, },
+ .in_num = 5,
+ .in_sizes = { 8, 8, 2, 1, 5, },
+ .out_num = 2,
+ .out_sizes = { 4, 4, },
},
[ND_CMD_ARS_STATUS] = {
- .out_num = 2,
- .out_sizes = { 4, UINT_MAX, },
+ .out_num = 3,
+ .out_sizes = { 4, 4, UINT_MAX, },
},
};
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return in_field[1];
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
- else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1)
- return ND_CMD_ARS_STATUS_MAX;
+ else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
+ return out_field[1] - 8;
return UINT_MAX;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
phys_addr_t phys_addr;
/* when non-zero this device is hosting a 'pfn' instance */
phys_addr_t data_offset;
- unsigned long pfn_flags;
+ u64 pfn_flags;
void __pmem *virt_addr;
size_t size;
struct badblocks bb;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3cd921e6121e..03c46412fff4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
- nvme_put_ctrl(ns->ctrl);
put_disk(ns->disk);
+ ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
+ nvme_put_ctrl(ns->ctrl);
kfree(ns);
}
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out_unmap;
}
- if (meta_buffer) {
+ if (meta_buffer && meta_len) {
struct bio_integrity_payload *bip;
meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
+ if (io.flags)
+ return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
u16 old_ms;
unsigned short bs;
+ if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+ set_capacity(disk, 0);
+ return -ENODEV;
+ }
if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
__func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret;
}
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
+{
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ if (ctrl->stripe_size)
+ blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_virt_boundary(q, ctrl->page_size - 1);
+}
+
/*
* Initialize the cached copies of the Identify data and various controller
* register in our nvme_ctrl structure. This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
}
+ nvme_set_queue_limits(ctrl, ctrl->admin_q);
+
kfree(id);
return 0;
}
@@ -1118,9 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (!ns)
return;
+ ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
+ if (ns->instance < 0)
+ goto out_free_ns;
+
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
- goto out_free_ns;
+ goto out_release_instance;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
@@ -1134,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- if (ctrl->max_hw_sectors) {
- blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
- blk_queue_max_segments(ns->queue,
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
- }
- if (ctrl->stripe_size)
- blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
- blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
+ nvme_set_queue_limits(ctrl, ns->queue);
disk->major = nvme_major;
disk->first_minor = 0;
@@ -1153,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
disk->queue = ns->queue;
disk->driverfs_dev = ctrl->device;
disk->flags = GENHD_FL_EXT_DEVT;
- sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid);
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
@@ -1173,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
kfree(disk);
out_free_queue:
blk_cleanup_queue(ns->queue);
+ out_release_instance:
+ ida_simple_remove(&ctrl->ns_ida, ns->instance);
out_free_ns:
kfree(ns);
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
- bool kill = nvme_io_incapable(ns->ctrl) &&
- !blk_queue_dying(ns->queue);
-
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
- if (kill) {
- blk_set_queue_dying(ns->queue);
+ if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
+ return;
- /*
- * The controller was shutdown first if we got here through
- * device removal. The shutdown may requeue outstanding
- * requests. These need to be aborted immediately so
- * del_gendisk doesn't block indefinitely for their completion.
- */
- blk_mq_abort_requeue_list(ns->queue);
- }
if (ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group);
del_gendisk(ns->disk);
- }
- if (kill || !blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
nvme_put_ns(ns);
}
@@ -1300,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
static DEFINE_IDA(nvme_instance_ida);
@@ -1350,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
put_device(ctrl->device);
nvme_release_instance(ctrl);
+ ida_destroy(&ctrl->ns_ida);
ctrl->ops->free_ctrl(ctrl);
}
@@ -1390,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
}
get_device(ctrl->device);
dev_set_drvdata(ctrl->device, ctrl);
+ ida_init(&ctrl->ns_ida);
spin_lock(&dev_list_lock);
list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1402,6 +1415,38 @@ out:
return ret;
}
+/**
+ * nvme_kill_queues(): Ends all namespace queues
+ * @ctrl: the dead controller that needs to end
+ *
+ * Call this function when the driver determines it is unable to get the
+ * controller in a state capable of servicing IO.
+ */
+void nvme_kill_queues(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (!kref_get_unless_zero(&ns->kref))
+ continue;
+
+ /*
+ * Revalidating a dead namespace sets capacity to 0. This will
+ * end buffered writers dirtying pages that can't be synced.
+ */
+ if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ revalidate_disk(ns->disk);
+
+ blk_set_queue_dying(ns->queue);
+ blk_mq_abort_requeue_list(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+ nvme_put_ns(ns);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9664d07d807d..fb15ba5f5d19 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ struct nvme_ctrl {
struct mutex namespaces_mutex;
struct device *device; /* char device */
struct list_head node;
+ struct ida ns_ida;
char name[12];
char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
struct request_queue *queue;
struct gendisk *disk;
struct kref kref;
+ int instance;
u8 eui[8];
u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
bool ext;
u8 pi_type;
int type;
+ unsigned long flags;
+
+#define NVME_NS_REMOVING 0
+#define NVME_NS_DEAD 1
+
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_kill_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a128672472ec..680f5780750c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -86,7 +86,6 @@ struct nvme_queue;
static int nvme_reset(struct nvme_dev *dev);
static void nvme_process_cq(struct nvme_queue *nvmeq);
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
/*
@@ -120,6 +119,7 @@ struct nvme_dev {
unsigned long flags;
#define NVME_CTRL_RESETTING 0
+#define NVME_CTRL_REMOVING 1
struct nvme_ctrl ctrl;
struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
+static void nvme_queue_scan(struct nvme_dev *dev)
+{
+ /*
+ * Do not queue new scan work when a controller is reset during
+ * removal.
+ */
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ return;
+ queue_work(nvme_workq, &dev->scan_work);
+}
+
static void nvme_complete_async_event(struct nvme_dev *dev,
struct nvme_completion *cqe)
{
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(dev->dev, "rescanning\n");
- queue_work(nvme_workq, &dev->scan_work);
+ nvme_queue_scan(dev);
default:
dev_warn(dev->dev, "async event result %08x\n", result);
}
@@ -679,7 +690,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&nvmeq->q_lock);
if (unlikely(nvmeq->cq_vector < 0)) {
- ret = BLK_MQ_RQ_QUEUE_BUSY;
+ if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ else
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
goto out;
}
@@ -1250,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
+ /*
+ * If the controller was reset during removal, it's possible
+ * user requests may be waiting on a stopped queue. Start the
+ * queue to flush these to completion.
+ */
+ blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
blk_cleanup_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
@@ -1690,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
return 0;
dev->ctrl.tagset = &dev->tagset;
}
- queue_work(nvme_workq, &dev->scan_work);
+ nvme_queue_scan(dev);
return 0;
}
-static int nvme_dev_map(struct nvme_dev *dev)
+static int nvme_pci_enable(struct nvme_dev *dev)
{
u64 cap;
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_enable_device_mem(pdev))
@@ -1705,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- goto disable_pci;
-
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable_pci;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
goto disable;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar)
- goto disable;
-
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
result = -ENODEV;
- goto unmap;
+ goto disable;
}
/*
@@ -1732,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!pdev->irq) {
result = pci_enable_msix(pdev, dev->entry, 1);
if (result < 0)
- goto unmap;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1759,18 +1769,20 @@ static int nvme_dev_map(struct nvme_dev *dev)
pci_save_state(pdev);
return 0;
- unmap:
- iounmap(dev->bar);
- dev->bar = NULL;
disable:
- pci_release_regions(pdev);
- disable_pci:
pci_disable_device(pdev);
return result;
}
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ if (dev->bar)
+ iounmap(dev->bar);
+ pci_release_regions(to_pci_dev(dev->dev));
+}
+
+static void nvme_pci_disable(struct nvme_dev *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->msi_enabled)
@@ -1778,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
else if (pdev->msix_enabled)
pci_disable_msix(pdev);
- if (dev->bar) {
- iounmap(dev->bar);
- dev->bar = NULL;
- pci_release_regions(pdev);
- }
-
if (pci_is_enabled(pdev)) {
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -1842,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_dev_list_remove(dev);
mutex_lock(&dev->shutdown_lock);
- if (dev->bar) {
+ if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS);
}
@@ -1855,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
- nvme_dev_unmap(dev);
+ nvme_pci_disable(dev);
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
@@ -1899,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
kfree(dev);
}
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
+{
+ dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
+
+ kref_get(&dev->ctrl.kref);
+ nvme_dev_disable(dev, false);
+ if (!schedule_work(&dev->remove_work))
+ nvme_put_ctrl(&dev->ctrl);
+}
+
static void nvme_reset_work(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
- int result;
+ int result = -ENODEV;
if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
goto out;
@@ -1911,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
* If we're called to reset a live controller first shut it down before
* moving on.
*/
- if (dev->bar)
+ if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
set_bit(NVME_CTRL_RESETTING, &dev->flags);
- result = nvme_dev_map(dev);
+ result = nvme_pci_enable(dev);
if (result)
goto out;
result = nvme_configure_admin_queue(dev);
if (result)
- goto unmap;
+ goto out;
nvme_init_queue(dev->queues[0], 0);
result = nvme_alloc_admin_tags(dev);
if (result)
- goto disable;
+ goto out;
result = nvme_init_identify(&dev->ctrl);
if (result)
- goto free_tags;
+ goto out;
result = nvme_setup_io_queues(dev);
if (result)
- goto free_tags;
+ goto out;
dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
result = nvme_dev_list_add(dev);
if (result)
- goto remove;
+ goto out;
/*
* Keep the controller around but remove all namespaces if we don't have
@@ -1958,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
clear_bit(NVME_CTRL_RESETTING, &dev->flags);
return;
- remove:
- nvme_dev_list_remove(dev);
- free_tags:
- nvme_dev_remove_admin(dev);
- blk_put_queue(dev->ctrl.admin_q);
- dev->ctrl.admin_q = NULL;
- dev->queues[0]->tags = NULL;
- disable:
- nvme_disable_admin_queue(dev, false);
- unmap:
- nvme_dev_unmap(dev);
out:
- nvme_remove_dead_ctrl(dev);
+ nvme_remove_dead_ctrl(dev, result);
}
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1978,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
pci_stop_and_remove_bus_device_locked(pdev);
nvme_put_ctrl(&dev->ctrl);
}
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
-{
- dev_warn(dev->dev, "Removing after probe failure\n");
- kref_get(&dev->ctrl.kref);
- if (!schedule_work(&dev->remove_work))
- nvme_put_ctrl(&dev->ctrl);
-}
-
static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2042,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.free_ctrl = nvme_pci_free_ctrl,
};
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+ int bars;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!bars)
+ return -ENODEV;
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ return -ENODEV;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto release;
+
+ return 0;
+ release:
+ pci_release_regions(pdev);
+ return -ENODEV;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -2066,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
+ result = nvme_dev_map(dev);
+ if (result)
+ goto free;
+
INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2089,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_release_prp_pools(dev);
put_pci:
put_device(dev->dev);
+ nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -2112,10 +2136,16 @@ static void nvme_shutdown(struct pci_dev *pdev)
nvme_dev_disable(dev, true);
}
+/*
+ * The driver's remove may be called on a device in a partially initialized
+ * state. This function must not have any dependencies on the device state in
+ * order to proceed.
+ */
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+ set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
@@ -2126,6 +2156,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
nvme_put_ctrl(&dev->ctrl);
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 669739b302b2..5e7838290998 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -303,6 +303,7 @@ EXPORT_SYMBOL(of_phy_find_device);
* @dev: pointer to net_device claiming the phy
* @phy_np: Pointer to device tree node for the PHY
* @hndlr: Link state callback for the network device
+ * @flags: flags to pass to the PHY
* @iface: PHY data interface type
*
* If successful, returns a pointer to the phy_device with the embedded
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARM
depends on OF
config PCIE_DW
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
u32 *bit_pos)
{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
struct pcie_port *pp;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 3923bed93c7e..f39961bcf7aa 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
}
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+ u32 val;
+
+ val = ioread32(pcie->dbi + PCIE_STRFMR1);
+ val &= 0xDFFFFFFF;
+ iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+}
+
static int ls1021_pcie_link_up(struct pcie_port *pp)
{
u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
- u32 val, index[2];
+ u32 index[2];
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
"fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- /*
- * LS1021A Workaround for internal TKT228622
- * to fix the INTx hang issue
- */
- val = ioread32(pcie->dbi + PCIE_STRFMR1);
- val &= 0xffff;
- iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+ ls_pcie_drop_msg_tlp(pcie);
}
static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
+ ls_pcie_drop_msg_tlp(pcie);
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
};
struct pcifront_sd {
- int domain;
+ struct pci_sysdata sd;
struct pcifront_device *pdev;
};
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus,
struct pcifront_device *pdev)
{
- sd->domain = domain;
+ /* Because we do not expose that information via XenBus. */
+ sd->sd.node = first_online_node;
+ sd->sd.domain = domain;
sd->pdev = pdev;
}
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus);
- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) {
err = -ENOMEM;
goto err_out;
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
#include <linux/power/bq27xxx_battery.h>
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
{
struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
{
struct bq27xxx_device_info *di;
int ret;
+ char *name;
+ int num;
+
+ /* Get new ID for the new battery device */
+ mutex_lock(&battery_mutex);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
+ mutex_unlock(&battery_mutex);
+ if (num < 0)
+ return num;
+
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name)
+ goto err_mem;
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di)
- return -ENOMEM;
+ goto err_mem;
+ di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
- di->name = id->name;
+ di->name = name;
di->bus.read = bq27xxx_battery_i2c_read;
ret = bq27xxx_battery_setup(di);
if (ret)
- return ret;
+ goto err_failed;
/* Schedule a polling after about 1 min */
schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
}
return 0;
+
+err_mem:
+ ret = -ENOMEM;
+
+err_failed:
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, num);
+ mutex_unlock(&battery_mutex);
+
+ return ret;
}
static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
bq27xxx_battery_teardown(di);
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, di->id);
+ mutex_unlock(&battery_mutex);
+
return 0;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3b3e0998fa6e..d6a691e27d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
struct ipr_sglist *sglist;
char fname[100];
char *src;
+ char *endline;
int result, dnld_size;
if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
snprintf(fname, sizeof(fname), "%s", buf);
+ endline = strchr(fname, '\n');
+ if (endline)
+ *endline = '\0';
+
if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
return -EIO;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6b2c4eb7a2..8c6e31874171 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
switch (ret) {
case BLKPREP_KILL:
+ case BLKPREP_INVALID:
req->errors = DID_NO_CONNECT << 16;
/* release the command and kill it */
if (req->special) {
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
if (!of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks"))
return 0;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 0c675861623f..d8e4219c2324 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -83,6 +83,7 @@ config SSB_SDIOHOST
config SSB_HOST_SOC
bool "Support for SSB bus on SoC"
depends on SSB && BCM47XX_NVRAM
+ select SSB_SPROM
help
Host interface for a SSB directly mapped into memory. This is
for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 3ec7e65a3ffa..db49af90217e 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -147,7 +147,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
mutex_lock(&mdev->graph_mutex);
ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
if (ret) {
- mutex_unlock(&video->lock);
+ mutex_unlock(&mdev->graph_mutex);
return -ENOMEM;
}
media_entity_graph_walk_start(&graph, entity);
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
/* register a nop PHY */
ci->phy = usb_phy_generic_register();
- if (!ci->phy)
- return -ENOMEM;
+ if (IS_ERR(ci->phy))
+ return PTR_ERR(ci->phy);
memset(res, 0, sizeof(res));
res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
+ if (mode > 255)
+ return -EBADRQC;
+
pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
int ci_hdrc_otg_init(struct ci_hdrc *ci)
{
INIT_WORK(&ci->work, ci_otg_work);
- ci->wq = create_singlethread_workqueue("ci_otg");
+ ci->wq = create_freezable_workqueue("ci_otg");
if (!ci->wq) {
dev_err(ci->dev, "can't create workqueue\n");
return -ENODEV;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9af5d8..51b436918f78 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
bos = udev->bos;
+ udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@@ -5493,11 +5494,8 @@ done:
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- /* release the new BOS descriptor allocated by hub_port_init() */
- if (udev->bos != bos) {
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
- }
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return 0;
re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
+ depends on HAS_DMA
depends on USB || USB_GADGET
help
Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55914db..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
__func__, hsotg->dr_mode);
break;
}
+
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms.
+ */
+ msleep(50);
}
/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
- if (*xfer_done && urb->status != -EINPROGRESS)
- failed = 1;
-
- if (failed) {
+ if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
- failed, *xfer_done, urb->status);
+ dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
+ failed, *xfer_done);
return failed;
}
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
+ int qtd_desc_count;
qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
+ qtd_desc_count = qtd->n_desc;
- for (i = 0; i < qtd->n_desc; i++) {
+ for (i = 0; i < qtd_desc_count; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done)) {
- qtd = NULL;
- break;
- }
+ &xfer_done))
+ goto stop_scan;
+
desc_num++;
}
}
+stop_scan:
if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
/*
* Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
*/
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
- else if (qtd)
+ else
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+ if (WARN(!chan || !chan->qh,
+ "chan->qh must be specified for non-control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
chan->qh->data_toggle = DWC2_HC_PID_DATA0;
else
chan->qh->data_toggle = DWC2_HC_PID_DATA1;
} else {
+ if (WARN(!qtd,
+ "qtd must be specified for control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
qtd->data_toggle = DWC2_HC_PID_DATA0;
else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
unsigned pullups_connected:1;
unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
- unsigned start_config_issued:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
int ret;
u32 reg;
- dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
- dwc->start_config_issued = false;
- /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82a95ac..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
dep->trb_pool_dma = 0;
}
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
+ int i;
+ int ret;
+
+ if (dep->number)
+ return 0;
memset(&params, 0x00, sizeof(params));
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
- if (dep->number != 1) {
- cmd = DWC3_DEPCMD_DEPSTARTCFG;
- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
- if (dep->number > 1) {
- if (dwc->start_config_issued)
- return 0;
- dwc->start_config_issued = true;
- cmd |= DWC3_DEPCMD_PARAM(2);
- }
+ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ if (ret)
+ return ret;
- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!dep)
+ continue;
+
+ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+ if (ret)
+ return ret;
}
return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
- if (ret)
- return ret;
-
dep->endpoint.desc = desc;
dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
- dwc->start_config_issued = false;
-
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc3_disconnect_gadget(dwc);
- dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
- dwc->start_config_issued = false;
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
setup_can_stall : 1,
setup_out_ready : 1,
setup_out_error : 1,
- setup_abort : 1;
+ setup_abort : 1,
+ gadget_registered : 1;
unsigned setup_wLength;
/* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
/* closing ep0 === shutdown all */
- usb_gadget_unregister_driver (&gadgetfs_driver);
+ if (dev->gadget_registered)
+ usb_gadget_unregister_driver (&gadgetfs_driver);
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
* kick in after the ep0 descriptor is closed.
*/
value = len;
+ dev->gadget_registered = true;
}
return value;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
{
struct qe_udc *udc;
struct device_node *np = ofdev->dev.of_node;
- unsigned int tmp_addr = 0;
+ unsigned long tmp_addr = 0;
struct usb_device_para __iomem *usbpram;
unsigned int i;
u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
0x50, 0x20, 0x70, 0x40, 0x90 };
- if (ep->dev->enhanced_mode)
+ if (ep->dev->enhanced_mode) {
reg = ep_enhanced[ep->num];
- else{
+ switch (ep->dev->gadget.speed) {
+ case USB_SPEED_SUPER:
+ reg += 2;
+ break;
+ case USB_SPEED_FULL:
+ reg += 1;
+ break;
+ case USB_SPEED_HIGH:
+ default:
+ break;
+ }
+ } else {
reg = (ep->num + 1) * 0x10;
if (ep->dev->gadget.speed != USB_SPEED_HIGH)
reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
if (!driver->udc_name || strcmp(driver->udc_name,
dev_name(&udc->dev)) == 0) {
ret = udc_bind_to_driver(udc, driver);
+ if (ret != -EPROBE_DEFER)
+ list_del(&driver->pending);
if (ret)
goto err4;
- list_del(&driver->pending);
break;
}
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
- channel->desired_mode = mode;
+ channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
qh->offset,
urb->transfer_buffer_length);
- done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh,
- urb, xfer_len,
- iso_err);
- if (done)
+ if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
+ xfer_len, iso_err))
goto finish;
else
dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
otg->host = host;
dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if peripheral is not supported
- * or peripheral is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
dev_dbg(otg->usb_phy->dev,
"peripheral driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if host is not supported
- * or host is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f612dda9c977..56ecb8b5115d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
To compile this driver as a module, choose M here: the
module will be called mos7840. If unsure, choose N.
-config USB_SERIAL_MXUPORT11
- tristate "USB Moxa UPORT 11x0 Serial Driver"
- ---help---
- Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
-
- This driver supports:
-
- - UPort 1110 : 1 port RS-232 USB to Serial Hub.
- - UPort 1130 : 1 port RS-422/485 USB to Serial Hub.
- - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
- - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub.
- - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
-
- To compile this driver as a module, choose M here: the
- module will be called mxu11x0.
-
config USB_SERIAL_MXUPORT
tristate "USB Moxa UPORT Serial Driver"
---help---
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f3fa5e53702d..349d9df0895f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
-obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b8a7f9..73a366de5102 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644
index 619607323bfd..000000000000
--- a/drivers/usb/serial/mxu11x0.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-/*
- * USB Moxa UPORT 11x0 Serial Driver
- *
- * Copyright (C) 2007 MOXA Technologies Co., Ltd.
- * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
- * Supports the following Moxa USB to serial converters:
- * UPort 1110, 1 port RS-232 USB to Serial Hub.
- * UPort 1130, 1 port RS-422/485 USB to Serial Hub.
- * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
- * protection.
- * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub.
- * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
- * protection.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-/* Vendor and product ids */
-#define MXU1_VENDOR_ID 0x110a
-#define MXU1_1110_PRODUCT_ID 0x1110
-#define MXU1_1130_PRODUCT_ID 0x1130
-#define MXU1_1150_PRODUCT_ID 0x1150
-#define MXU1_1151_PRODUCT_ID 0x1151
-#define MXU1_1131_PRODUCT_ID 0x1131
-
-/* Commands */
-#define MXU1_GET_VERSION 0x01
-#define MXU1_GET_PORT_STATUS 0x02
-#define MXU1_GET_PORT_DEV_INFO 0x03
-#define MXU1_GET_CONFIG 0x04
-#define MXU1_SET_CONFIG 0x05
-#define MXU1_OPEN_PORT 0x06
-#define MXU1_CLOSE_PORT 0x07
-#define MXU1_START_PORT 0x08
-#define MXU1_STOP_PORT 0x09
-#define MXU1_TEST_PORT 0x0A
-#define MXU1_PURGE_PORT 0x0B
-#define MXU1_RESET_EXT_DEVICE 0x0C
-#define MXU1_GET_OUTQUEUE 0x0D
-#define MXU1_WRITE_DATA 0x80
-#define MXU1_READ_DATA 0x81
-#define MXU1_REQ_TYPE_CLASS 0x82
-
-/* Module identifiers */
-#define MXU1_I2C_PORT 0x01
-#define MXU1_IEEE1284_PORT 0x02
-#define MXU1_UART1_PORT 0x03
-#define MXU1_UART2_PORT 0x04
-#define MXU1_RAM_PORT 0x05
-
-/* Modem status */
-#define MXU1_MSR_DELTA_CTS 0x01
-#define MXU1_MSR_DELTA_DSR 0x02
-#define MXU1_MSR_DELTA_RI 0x04
-#define MXU1_MSR_DELTA_CD 0x08
-#define MXU1_MSR_CTS 0x10
-#define MXU1_MSR_DSR 0x20
-#define MXU1_MSR_RI 0x40
-#define MXU1_MSR_CD 0x80
-#define MXU1_MSR_DELTA_MASK 0x0F
-#define MXU1_MSR_MASK 0xF0
-
-/* Line status */
-#define MXU1_LSR_OVERRUN_ERROR 0x01
-#define MXU1_LSR_PARITY_ERROR 0x02
-#define MXU1_LSR_FRAMING_ERROR 0x04
-#define MXU1_LSR_BREAK 0x08
-#define MXU1_LSR_ERROR 0x0F
-#define MXU1_LSR_RX_FULL 0x10
-#define MXU1_LSR_TX_EMPTY 0x20
-
-/* Modem control */
-#define MXU1_MCR_LOOP 0x04
-#define MXU1_MCR_DTR 0x10
-#define MXU1_MCR_RTS 0x20
-
-/* Mask settings */
-#define MXU1_UART_ENABLE_RTS_IN 0x0001
-#define MXU1_UART_DISABLE_RTS 0x0002
-#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008
-#define MXU1_UART_ENABLE_DSR_OUT 0x0010
-#define MXU1_UART_ENABLE_CTS_OUT 0x0020
-#define MXU1_UART_ENABLE_X_OUT 0x0040
-#define MXU1_UART_ENABLE_XA_OUT 0x0080
-#define MXU1_UART_ENABLE_X_IN 0x0100
-#define MXU1_UART_ENABLE_DTR_IN 0x0800
-#define MXU1_UART_DISABLE_DTR 0x1000
-#define MXU1_UART_ENABLE_MS_INTS 0x2000
-#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000
-#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000
-
-/* Parity */
-#define MXU1_UART_NO_PARITY 0x00
-#define MXU1_UART_ODD_PARITY 0x01
-#define MXU1_UART_EVEN_PARITY 0x02
-#define MXU1_UART_MARK_PARITY 0x03
-#define MXU1_UART_SPACE_PARITY 0x04
-
-/* Stop bits */
-#define MXU1_UART_1_STOP_BITS 0x00
-#define MXU1_UART_1_5_STOP_BITS 0x01
-#define MXU1_UART_2_STOP_BITS 0x02
-
-/* Bits per character */
-#define MXU1_UART_5_DATA_BITS 0x00
-#define MXU1_UART_6_DATA_BITS 0x01
-#define MXU1_UART_7_DATA_BITS 0x02
-#define MXU1_UART_8_DATA_BITS 0x03
-
-/* Operation modes */
-#define MXU1_UART_232 0x00
-#define MXU1_UART_485_RECEIVER_DISABLED 0x01
-#define MXU1_UART_485_RECEIVER_ENABLED 0x02
-
-/* Pipe transfer mode and timeout */
-#define MXU1_PIPE_MODE_CONTINUOUS 0x01
-#define MXU1_PIPE_MODE_MASK 0x03
-#define MXU1_PIPE_TIMEOUT_MASK 0x7C
-#define MXU1_PIPE_TIMEOUT_ENABLE 0x80
-
-/* Config struct */
-struct mxu1_uart_config {
- __be16 wBaudRate;
- __be16 wFlags;
- u8 bDataBits;
- u8 bParity;
- u8 bStopBits;
- char cXon;
- char cXoff;
- u8 bUartMode;
-} __packed;
-
-/* Purge modes */
-#define MXU1_PURGE_OUTPUT 0x00
-#define MXU1_PURGE_INPUT 0x80
-
-/* Read/Write data */
-#define MXU1_RW_DATA_ADDR_SFR 0x10
-#define MXU1_RW_DATA_ADDR_IDATA 0x20
-#define MXU1_RW_DATA_ADDR_XDATA 0x30
-#define MXU1_RW_DATA_ADDR_CODE 0x40
-#define MXU1_RW_DATA_ADDR_GPIO 0x50
-#define MXU1_RW_DATA_ADDR_I2C 0x60
-#define MXU1_RW_DATA_ADDR_FLASH 0x70
-#define MXU1_RW_DATA_ADDR_DSP 0x80
-
-#define MXU1_RW_DATA_UNSPECIFIED 0x00
-#define MXU1_RW_DATA_BYTE 0x01
-#define MXU1_RW_DATA_WORD 0x02
-#define MXU1_RW_DATA_DOUBLE_WORD 0x04
-
-struct mxu1_write_data_bytes {
- u8 bAddrType;
- u8 bDataType;
- u8 bDataCounter;
- __be16 wBaseAddrHi;
- __be16 wBaseAddrLo;
- u8 bData[0];
-} __packed;
-
-/* Interrupt codes */
-#define MXU1_CODE_HARDWARE_ERROR 0xFF
-#define MXU1_CODE_DATA_ERROR 0x03
-#define MXU1_CODE_MODEM_STATUS 0x04
-
-static inline int mxu1_get_func_from_code(unsigned char code)
-{
- return code & 0x0f;
-}
-
-/* Download firmware max packet size */
-#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64
-
-/* Firmware image header */
-struct mxu1_firmware_header {
- __le16 wLength;
- u8 bCheckSum;
-} __packed;
-
-#define MXU1_UART_BASE_ADDR 0xFFA0
-#define MXU1_UART_OFFSET_MCR 0x0004
-
-#define MXU1_BAUD_BASE 923077
-
-#define MXU1_TRANSFER_TIMEOUT 2
-#define MXU1_DOWNLOAD_TIMEOUT 1000
-#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
-
-struct mxu1_port {
- u8 msr;
- u8 mcr;
- u8 uart_mode;
- spinlock_t spinlock; /* Protects msr */
- struct mutex mutex; /* Protects mcr */
- bool send_break;
-};
-
-struct mxu1_device {
- u16 mxd_model;
-};
-
-static const struct usb_device_id mxu1_idtable[] = {
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(usb, mxu1_idtable);
-
-/* Write the given buffer out to the control pipe. */
-static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
- u8 request,
- u16 value, u16 index,
- void *data, size_t size)
-{
- int status;
-
- status = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0),
- request,
- (USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE), value, index,
- data, size,
- USB_CTRL_SET_TIMEOUT);
- if (status < 0) {
- dev_err(&serial->interface->dev,
- "%s - usb_control_msg failed: %d\n",
- __func__, status);
- return status;
- }
-
- if (status != size) {
- dev_err(&serial->interface->dev,
- "%s - short write (%d / %zd)\n",
- __func__, status, size);
- return -EIO;
- }
-
- return 0;
-}
-
-/* Send a vendor request without any data */
-static int mxu1_send_ctrl_urb(struct usb_serial *serial,
- u8 request, u16 value, u16 index)
-{
- return mxu1_send_ctrl_data_urb(serial, request, value, index,
- NULL, 0);
-}
-
-static int mxu1_download_firmware(struct usb_serial *serial,
- const struct firmware *fw_p)
-{
- int status = 0;
- int buffer_size;
- int pos;
- int len;
- int done;
- u8 cs = 0;
- u8 *buffer;
- struct usb_device *dev = serial->dev;
- struct mxu1_firmware_header *header;
- unsigned int pipe;
-
- pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
-
- buffer_size = fw_p->size + sizeof(*header);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- memcpy(buffer, fw_p->data, fw_p->size);
- memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
-
- for (pos = sizeof(*header); pos < buffer_size; pos++)
- cs = (u8)(cs + buffer[pos]);
-
- header = (struct mxu1_firmware_header *)buffer;
- header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
- header->bCheckSum = cs;
-
- dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
-
- for (pos = 0; pos < buffer_size; pos += done) {
- len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
-
- status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
- MXU1_DOWNLOAD_TIMEOUT);
- if (status)
- break;
- }
-
- kfree(buffer);
-
- if (status) {
- dev_err(&dev->dev, "failed to download firmware: %d\n", status);
- return status;
- }
-
- msleep_interruptible(100);
- usb_reset_device(dev);
-
- dev_dbg(&dev->dev, "%s - download successful\n", __func__);
-
- return 0;
-}
-
-static int mxu1_port_probe(struct usb_serial_port *port)
-{
- struct mxu1_port *mxport;
- struct mxu1_device *mxdev;
-
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "no interrupt urb\n");
- return -ENODEV;
- }
-
- mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
- if (!mxport)
- return -ENOMEM;
-
- spin_lock_init(&mxport->spinlock);
- mutex_init(&mxport->mutex);
-
- mxdev = usb_get_serial_data(port->serial);
-
- switch (mxdev->mxd_model) {
- case MXU1_1110_PRODUCT_ID:
- case MXU1_1150_PRODUCT_ID:
- case MXU1_1151_PRODUCT_ID:
- mxport->uart_mode = MXU1_UART_232;
- break;
- case MXU1_1130_PRODUCT_ID:
- case MXU1_1131_PRODUCT_ID:
- mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
- break;
- }
-
- usb_set_serial_port_data(port, mxport);
-
- port->port.closing_wait =
- msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
- port->port.drain_delay = 1;
-
- return 0;
-}
-
-static int mxu1_port_remove(struct usb_serial_port *port)
-{
- struct mxu1_port *mxport;
-
- mxport = usb_get_serial_port_data(port);
- kfree(mxport);
-
- return 0;
-}
-
-static int mxu1_startup(struct usb_serial *serial)
-{
- struct mxu1_device *mxdev;
- struct usb_device *dev = serial->dev;
- struct usb_host_interface *cur_altsetting;
- char fw_name[32];
- const struct firmware *fw_p = NULL;
- int err;
-
- dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
- __func__, le16_to_cpu(dev->descriptor.idProduct),
- dev->descriptor.bNumConfigurations,
- dev->actconfig->desc.bConfigurationValue);
-
- /* create device structure */
- mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
- if (!mxdev)
- return -ENOMEM;
-
- usb_set_serial_data(serial, mxdev);
-
- mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
-
- cur_altsetting = serial->interface->cur_altsetting;
-
- /* if we have only 1 configuration, download firmware */
- if (cur_altsetting->desc.bNumEndpoints == 1) {
-
- snprintf(fw_name,
- sizeof(fw_name),
- "moxa/moxa-%04x.fw",
- mxdev->mxd_model);
-
- err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
- if (err) {
- dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
- err);
- goto err_free_mxdev;
- }
-
- err = mxu1_download_firmware(serial, fw_p);
- if (err)
- goto err_release_firmware;
-
- /* device is being reset */
- err = -ENODEV;
- goto err_release_firmware;
- }
-
- return 0;
-
-err_release_firmware:
- release_firmware(fw_p);
-err_free_mxdev:
- kfree(mxdev);
-
- return err;
-}
-
-static void mxu1_release(struct usb_serial *serial)
-{
- struct mxu1_device *mxdev;
-
- mxdev = usb_get_serial_data(serial);
- kfree(mxdev);
-}
-
-static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
- u8 mask, u8 byte)
-{
- int status;
- size_t size;
- struct mxu1_write_data_bytes *data;
-
- dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
- __func__, addr, mask, byte);
-
- size = sizeof(struct mxu1_write_data_bytes) + 2;
- data = kzalloc(size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
- data->bDataType = MXU1_RW_DATA_BYTE;
- data->bDataCounter = 1;
- data->wBaseAddrHi = cpu_to_be16(addr >> 16);
- data->wBaseAddrLo = cpu_to_be16(addr);
- data->bData[0] = mask;
- data->bData[1] = byte;
-
- status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
- MXU1_RAM_PORT, data, size);
- if (status < 0)
- dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
-
- kfree(data);
-
- return status;
-}
-
-static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
-{
- int status;
-
- status = mxu1_write_byte(port,
- MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
- MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
- mcr);
- return status;
-}
-
-static void mxu1_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct mxu1_uart_config *config;
- tcflag_t cflag, iflag;
- speed_t baud;
- int status;
- unsigned int mcr;
-
- cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
-
- if (old_termios &&
- !tty_termios_hw_change(&tty->termios, old_termios) &&
- tty->termios.c_iflag == old_termios->c_iflag) {
- dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
- return;
- }
-
- dev_dbg(&port->dev,
- "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
-
- if (old_termios) {
- dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
- __func__,
- old_termios->c_cflag,
- old_termios->c_iflag);
- }
-
- config = kzalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return;
-
- /* these flags must be set */
- config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
- config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
- if (mxport->send_break)
- config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
- config->bUartMode = mxport->uart_mode;
-
- switch (C_CSIZE(tty)) {
- case CS5:
- config->bDataBits = MXU1_UART_5_DATA_BITS;
- break;
- case CS6:
- config->bDataBits = MXU1_UART_6_DATA_BITS;
- break;
- case CS7:
- config->bDataBits = MXU1_UART_7_DATA_BITS;
- break;
- default:
- case CS8:
- config->bDataBits = MXU1_UART_8_DATA_BITS;
- break;
- }
-
- if (C_PARENB(tty)) {
- config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
- if (C_CMSPAR(tty)) {
- if (C_PARODD(tty))
- config->bParity = MXU1_UART_MARK_PARITY;
- else
- config->bParity = MXU1_UART_SPACE_PARITY;
- } else {
- if (C_PARODD(tty))
- config->bParity = MXU1_UART_ODD_PARITY;
- else
- config->bParity = MXU1_UART_EVEN_PARITY;
- }
- } else {
- config->bParity = MXU1_UART_NO_PARITY;
- }
-
- if (C_CSTOPB(tty))
- config->bStopBits = MXU1_UART_2_STOP_BITS;
- else
- config->bStopBits = MXU1_UART_1_STOP_BITS;
-
- if (C_CRTSCTS(tty)) {
- /* RTS flow control must be off to drop RTS for baud rate B0 */
- if (C_BAUD(tty) != B0)
- config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
- config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
- }
-
- if (I_IXOFF(tty) || I_IXON(tty)) {
- config->cXon = START_CHAR(tty);
- config->cXoff = STOP_CHAR(tty);
-
- if (I_IXOFF(tty))
- config->wFlags |= MXU1_UART_ENABLE_X_IN;
-
- if (I_IXON(tty))
- config->wFlags |= MXU1_UART_ENABLE_X_OUT;
- }
-
- baud = tty_get_baud_rate(tty);
- if (!baud)
- baud = 9600;
- config->wBaudRate = MXU1_BAUD_BASE / baud;
-
- dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
- __func__, baud, config->wBaudRate, config->wFlags,
- config->bDataBits, config->bParity, config->bStopBits,
- config->cXon, config->cXoff, config->bUartMode);
-
- cpu_to_be16s(&config->wBaudRate);
- cpu_to_be16s(&config->wFlags);
-
- status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
- MXU1_UART1_PORT, config,
- sizeof(*config));
- if (status)
- dev_err(&port->dev, "cannot set config: %d\n", status);
-
- mutex_lock(&mxport->mutex);
- mcr = mxport->mcr;
-
- if (C_BAUD(tty) == B0)
- mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
- else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
- mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
-
- status = mxu1_set_mcr(port, mcr);
- if (status)
- dev_err(&port->dev, "cannot set modem control: %d\n", status);
- else
- mxport->mcr = mcr;
-
- mutex_unlock(&mxport->mutex);
-
- kfree(config);
-}
-
-static int mxu1_get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *ret_arg)
-{
- struct serial_struct ret_serial;
- unsigned cwait;
-
- if (!ret_arg)
- return -EFAULT;
-
- cwait = port->port.closing_wait;
- if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = jiffies_to_msecs(cwait) / 10;
-
- memset(&ret_serial, 0, sizeof(ret_serial));
-
- ret_serial.type = PORT_16550A;
- ret_serial.line = port->minor;
- ret_serial.port = 0;
- ret_serial.xmit_fifo_size = port->bulk_out_size;
- ret_serial.baud_base = MXU1_BAUD_BASE;
- ret_serial.close_delay = 5*HZ;
- ret_serial.closing_wait = cwait;
-
- if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int mxu1_set_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *new_arg)
-{
- struct serial_struct new_serial;
- unsigned cwait;
-
- if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
- return -EFAULT;
-
- cwait = new_serial.closing_wait;
- if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
-
- port->port.closing_wait = cwait;
-
- return 0;
-}
-
-static int mxu1_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
-
- switch (cmd) {
- case TIOCGSERIAL:
- return mxu1_get_serial_info(port,
- (struct serial_struct __user *)arg);
- case TIOCSSERIAL:
- return mxu1_set_serial_info(port,
- (struct serial_struct __user *)arg);
- }
-
- return -ENOIOCTLCMD;
-}
-
-static int mxu1_tiocmget(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- unsigned int result;
- unsigned int msr;
- unsigned int mcr;
- unsigned long flags;
-
- mutex_lock(&mxport->mutex);
- spin_lock_irqsave(&mxport->spinlock, flags);
-
- msr = mxport->msr;
- mcr = mxport->mcr;
-
- spin_unlock_irqrestore(&mxport->spinlock, flags);
- mutex_unlock(&mxport->mutex);
-
- result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) |
- ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) |
- ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) |
- ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) |
- ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) |
- ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) |
- ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0);
-
- dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
-
- return result;
-}
-
-static int mxu1_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- int err;
- unsigned int mcr;
-
- mutex_lock(&mxport->mutex);
- mcr = mxport->mcr;
-
- if (set & TIOCM_RTS)
- mcr |= MXU1_MCR_RTS;
- if (set & TIOCM_DTR)
- mcr |= MXU1_MCR_DTR;
- if (set & TIOCM_LOOP)
- mcr |= MXU1_MCR_LOOP;
-
- if (clear & TIOCM_RTS)
- mcr &= ~MXU1_MCR_RTS;
- if (clear & TIOCM_DTR)
- mcr &= ~MXU1_MCR_DTR;
- if (clear & TIOCM_LOOP)
- mcr &= ~MXU1_MCR_LOOP;
-
- err = mxu1_set_mcr(port, mcr);
- if (!err)
- mxport->mcr = mcr;
-
- mutex_unlock(&mxport->mutex);
-
- return err;
-}
-
-static void mxu1_break(struct tty_struct *tty, int break_state)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
-
- if (break_state == -1)
- mxport->send_break = true;
- else
- mxport->send_break = false;
-
- mxu1_set_termios(tty, port, NULL);
-}
-
-static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- int status;
- u16 open_settings;
-
- open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
- MXU1_PIPE_TIMEOUT_ENABLE |
- (MXU1_TRANSFER_TIMEOUT << 2));
-
- mxport->msr = 0;
-
- status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (status) {
- dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
- status);
- return status;
- }
-
- if (tty)
- mxu1_set_termios(tty, port, NULL);
-
- status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
- open_settings, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send open command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send start command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
- MXU1_PURGE_INPUT, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot clear input buffers: %d\n",
- status);
-
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
- MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot clear output buffers: %d\n",
- status);
-
- goto unlink_int_urb;
- }
-
- /*
- * reset the data toggle on the bulk endpoints to work around bug in
- * host controllers where things get out of sync some times
- */
- usb_clear_halt(serial->dev, port->write_urb->pipe);
- usb_clear_halt(serial->dev, port->read_urb->pipe);
-
- if (tty)
- mxu1_set_termios(tty, port, NULL);
-
- status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
- open_settings, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send open command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send start command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = usb_serial_generic_open(tty, port);
- if (status)
- goto unlink_int_urb;
-
- return 0;
-
-unlink_int_urb:
- usb_kill_urb(port->interrupt_in_urb);
-
- return status;
-}
-
-static void mxu1_close(struct usb_serial_port *port)
-{
- int status;
-
- usb_serial_generic_close(port);
- usb_kill_urb(port->interrupt_in_urb);
-
- status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "failed to send close port command: %d\n",
- status);
- }
-}
-
-static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct async_icount *icount;
- unsigned long flags;
-
- dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
-
- spin_lock_irqsave(&mxport->spinlock, flags);
- mxport->msr = msr & MXU1_MSR_MASK;
- spin_unlock_irqrestore(&mxport->spinlock, flags);
-
- if (msr & MXU1_MSR_DELTA_MASK) {
- icount = &port->icount;
- if (msr & MXU1_MSR_DELTA_CTS)
- icount->cts++;
- if (msr & MXU1_MSR_DELTA_DSR)
- icount->dsr++;
- if (msr & MXU1_MSR_DELTA_CD)
- icount->dcd++;
- if (msr & MXU1_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port.delta_msr_wait);
- }
-}
-
-static void mxu1_interrupt_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned char *data = urb->transfer_buffer;
- int length = urb->actual_length;
- int function;
- int status;
- u8 msr;
-
- switch (urb->status) {
- case 0:
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
- __func__, urb->status);
- return;
- default:
- dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
- goto exit;
- }
-
- if (length != 2) {
- dev_dbg(&port->dev, "%s - bad packet size: %d\n",
- __func__, length);
- goto exit;
- }
-
- if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
- dev_err(&port->dev, "hardware error: %d\n", data[1]);
- goto exit;
- }
-
- function = mxu1_get_func_from_code(data[0]);
-
- dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
- __func__, function, data[1]);
-
- switch (function) {
- case MXU1_CODE_DATA_ERROR:
- dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
- __func__, data[1]);
- break;
-
- case MXU1_CODE_MODEM_STATUS:
- msr = data[1];
- mxu1_handle_new_msr(port, msr);
- break;
-
- default:
- dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
- data[1]);
- break;
- }
-
-exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
- status);
- }
-}
-
-static struct usb_serial_driver mxu11x0_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "mxu11x0",
- },
- .description = "MOXA UPort 11x0",
- .id_table = mxu1_idtable,
- .num_ports = 1,
- .port_probe = mxu1_port_probe,
- .port_remove = mxu1_port_remove,
- .attach = mxu1_startup,
- .release = mxu1_release,
- .open = mxu1_open,
- .close = mxu1_close,
- .ioctl = mxu1_ioctl,
- .set_termios = mxu1_set_termios,
- .tiocmget = mxu1_tiocmget,
- .tiocmset = mxu1_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
- .get_icount = usb_serial_generic_get_icount,
- .break_ctl = mxu1_break,
- .read_int_callback = mxu1_interrupt_callback,
-};
-
-static struct usb_serial_driver *const serial_drivers[] = {
- &mxu11x0_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, mxu1_idtable);
-
-MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
-MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("moxa/moxa-1110.fw");
-MODULE_FIRMWARE("moxa/moxa-1130.fw");
-MODULE_FIRMWARE("moxa/moxa-1131.fw");
-MODULE_FIRMWARE("moxa/moxa-1150.fw");
-MODULE_FIRMWARE("moxa/moxa-1151.fw");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e512e0fc..348e19834b83 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_UE910_V2 0x1012
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
.reserved = BIT(3) | BIT(4),
};
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+ .reserved = BIT(5) | BIT(6),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */
- {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */
+ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2760a7ba3f30..8c80a48e3233 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
info.num_regions = VFIO_PCI_NUM_REGIONS;
info.num_irqs = VFIO_PCI_NUM_IRQS;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
}
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
info.num_regions = vdev->num_regions;
info.num_irqs = vdev->num_irqs;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
info.flags = vdev->irqs[info.index].flags;
info.count = vdev->irqs[info.index].count;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f1ea3dddbad..75b24e93cedb 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &unmap, minsz);
+ return copy_to_user((void __user *)arg, &unmap, minsz) ?
+ -EFAULT : 0;
}
return -ENOTTY;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..236553e81027 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
+ bool is_le = vq->is_le;
+
if (!vq->private_data) {
vq->is_le = virtio_legacy_is_little_endian();
return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
r = vhost_update_used_flags(vq);
if (r)
- return r;
+ goto err;
vq->signalled_used_valid = false;
- if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+ r = -EFAULT;
+ goto err;
+ }
r = __get_user(last_used_idx, &vq->used->idx);
if (r)
- return r;
+ goto err;
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
+err:
+ vq->is_le = is_le;
+ return r;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
}
if (!err) {
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c0c11fad4611..7760fc1a2218 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
pci_read_config_dword(pci_dev,
notify + offsetof(struct virtio_pci_notify_cap,
- cap.length),
+ cap.offset),
&notify_offset);
/* We don't know how many VQs we'll map, ahead of the time.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0f6d8515ba4f..80825a7e8e48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1569,6 +1569,17 @@ config WATCHDOG_RIO
machines. The watchdog timeout period is normally one minute but
can be changed with a boot-time parameter.
+config WATCHDOG_SUN4V
+ tristate "Sun4v Watchdog support"
+ select WATCHDOG_CORE
+ depends on SPARC64
+ help
+ Say Y here to support the hypervisor watchdog capability embedded
+ in the SPARC sun4v architecture.
+
+ To compile this driver as a module, choose M here. The module will
+ be called sun4v_wdt.
+
# XTENSA Architecture
# Xen Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753256ab..f6a6a387c6c7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
obj-$(CONFIG_WATCHDOG_RIO) += riowd.o
obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
+obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
# XTENSA Architecture
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644
index 000000000000..1467fe50a76f
--- /dev/null
+++ b/drivers/watchdog/sun4v_wdt.c
@@ -0,0 +1,191 @@
+/*
+ * sun4v watchdog timer
+ * (c) Copyright 2016 Oracle Corporation
+ *
+ * Implement a simple watchdog driver using the built-in sun4v hypervisor
+ * watchdog support. If time expires, the hypervisor stops or bounces
+ * the guest domain.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#define WDT_TIMEOUT 60
+#define WDT_MAX_TIMEOUT 31536000
+#define WDT_MIN_TIMEOUT 1
+#define WDT_DEFAULT_RESOLUTION_MS 1000 /* 1 second */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int sun4v_wdt_stop(struct watchdog_device *wdd)
+{
+ sun4v_mach_set_watchdog(0, NULL);
+
+ return 0;
+}
+
+static int sun4v_wdt_ping(struct watchdog_device *wdd)
+{
+ int hverr;
+
+ /*
+ * HV watchdog timer will round up the timeout
+ * passed in to the nearest multiple of the
+ * watchdog resolution in milliseconds.
+ */
+ hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
+ if (hverr == HV_EINVAL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info sun4v_wdt_ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .identity = "sun4v hypervisor watchdog",
+ .firmware_version = 0,
+};
+
+static struct watchdog_ops sun4v_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sun4v_wdt_ping,
+ .stop = sun4v_wdt_stop,
+ .ping = sun4v_wdt_ping,
+ .set_timeout = sun4v_wdt_set_timeout,
+};
+
+static struct watchdog_device wdd = {
+ .info = &sun4v_wdt_ident,
+ .ops = &sun4v_wdt_ops,
+ .min_timeout = WDT_MIN_TIMEOUT,
+ .max_timeout = WDT_MAX_TIMEOUT,
+ .timeout = WDT_TIMEOUT,
+};
+
+static int __init sun4v_wdt_init(void)
+{
+ struct mdesc_handle *handle;
+ u64 node;
+ const u64 *value;
+ int err = 0;
+ unsigned long major = 1, minor = 1;
+
+ /*
+ * There are 2 properties that can be set from the control
+ * domain for the watchdog.
+ * watchdog-resolution
+ * watchdog-max-timeout
+ *
+ * We can expect a handle to be returned otherwise something
+ * serious is wrong. Correct to return -ENODEV here.
+ */
+
+ handle = mdesc_grab();
+ if (!handle)
+ return -ENODEV;
+
+ node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
+ err = -ENODEV;
+ if (node == MDESC_NODE_NULL)
+ goto out_release;
+
+ /*
+ * This is a safe way to validate if we are on the right
+ * platform.
+ */
+ if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
+ goto out_hv_unreg;
+
+ /* Allow value of watchdog-resolution up to 1s (default) */
+ value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
+ err = -EINVAL;
+ if (value) {
+ if (*value == 0 ||
+ *value > WDT_DEFAULT_RESOLUTION_MS)
+ goto out_hv_unreg;
+ }
+
+ value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
+ if (value) {
+ /*
+ * If the property value (in ms) is smaller than
+ * min_timeout, return -EINVAL.
+ */
+ if (*value < wdd.min_timeout * 1000)
+ goto out_hv_unreg;
+
+ /*
+ * If the property value is smaller than
+ * default max_timeout then set watchdog max_timeout to
+ * the value of the property in seconds.
+ */
+ if (*value < wdd.max_timeout * 1000)
+ wdd.max_timeout = *value / 1000;
+ }
+
+ watchdog_init_timeout(&wdd, timeout, NULL);
+
+ watchdog_set_nowayout(&wdd, nowayout);
+
+ err = watchdog_register_device(&wdd);
+ if (err)
+ goto out_hv_unreg;
+
+ pr_info("initialized (timeout=%ds, nowayout=%d)\n",
+ wdd.timeout, nowayout);
+
+ mdesc_release(handle);
+
+ return 0;
+
+out_hv_unreg:
+ sun4v_hvapi_unregister(HV_GRP_CORE);
+
+out_release:
+ mdesc_release(handle);
+ return err;
+}
+
+static void __exit sun4v_wdt_exit(void)
+{
+ sun4v_hvapi_unregister(HV_GRP_CORE);
+ watchdog_unregister_device(&wdd);
+}
+
+module_init(sun4v_wdt_init);
+module_exit(sun4v_wdt_exit);
+
+MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
+MODULE_DESCRIPTION("sun4v watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
/*
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
* to access the BARs where the MSI-X entries reside.
+ * But VF devices are unique in which the PF needs to be checked.
*/
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO;
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+ unsigned int nr = 0;
+#endif
*op = pdev->sh_info->op;
barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
+ nr = op->value;
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
unsigned int i;
- for (i = 0; i < op->value; i++)
+ for (i = 0; i < nr; i++)
pdev->sh_info->op.msix_entries[i].vector =
op->msix_entries[i].vector;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
}
/*
+ Check for a translation entry being present
+*/
+static struct v2p_entry *scsiback_chk_translation_entry(
+ struct vscsibk_info *info, struct ids_tuple *v)
+{
+ struct list_head *head = &(info->v2p_entry_lists);
+ struct v2p_entry *entry;
+
+ list_for_each_entry(entry, head, l)
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun))
+ return entry;
+
+ return NULL;
+}
+
+/*
Add a new translation entry
*/
static int scsiback_add_translation_entry(struct vscsibk_info *info,
char *phy, struct ids_tuple *v)
{
int err = 0;
- struct v2p_entry *entry;
struct v2p_entry *new;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
char *lunp;
unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
spin_lock_irqsave(&info->v2p_lock, flags);
/* Check double assignment to identical virtual ID */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- pr_warn("Virtual ID is already used. Assignment was not performed.\n");
- err = -EEXIST;
- goto out;
- }
-
+ if (scsiback_chk_translation_entry(info, v)) {
+ pr_warn("Virtual ID is already used. Assignment was not performed.\n");
+ err = -EEXIST;
+ goto out;
}
/* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
new->v = *v;
new->tpg = tpg;
new->lun = unpacked_lun;
- list_add_tail(&new->l, head);
+ list_add_tail(&new->l, &info->v2p_entry_lists);
out:
spin_unlock_irqrestore(&info->v2p_lock, flags);
out_free:
- mutex_lock(&tpg->tv_tpg_mutex);
- tpg->tv_tpg_fe_count--;
- mutex_unlock(&tpg->tv_tpg_mutex);
-
- if (err)
+ if (err) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
kfree(new);
+ }
return err;
}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
}
/*
- Delete the translation entry specfied
+ Delete the translation entry specified
*/
static int scsiback_del_translation_entry(struct vscsibk_info *info,
struct ids_tuple *v)
{
struct v2p_entry *entry;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&info->v2p_lock, flags);
/* Find out the translation entry specified */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- goto found;
- }
- }
-
- spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 1;
-
-found:
- /* Delete the translation entry specfied */
- __scsiback_del_translation_entry(entry);
+ entry = scsiback_chk_translation_entry(info, v);
+ if (entry)
+ __scsiback_del_translation_entry(entry);
+ else
+ ret = -ENOENT;
spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 0;
+ return ret;
}
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
char *phy, struct ids_tuple *vir, int try)
{
+ struct v2p_entry *entry;
+ unsigned long flags;
+
+ if (try) {
+ spin_lock_irqsave(&info->v2p_lock, flags);
+ entry = scsiback_chk_translation_entry(info, vir);
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ if (entry)
+ return;
+ }
if (!scsiback_add_translation_entry(info, phy, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
if (len == 0)
return 0;
+ if (len > XENSTORE_PAYLOAD_MAX)
+ return -EINVAL;
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
if (rb == NULL)