summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/acapps.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h13
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/acobject.h15
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbdisply.c37
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c398
-rw-r--r--drivers/acpi/acpica/dsopcode.c9
-rw-r--r--drivers/acpi/acpica/dspkginit.c496
-rw-r--r--drivers/acpi/acpica/evgpeblk.c30
-rw-r--r--drivers/acpi/acpica/evxfgpe.c8
-rw-r--r--drivers/acpi/acpica/excreate.c62
-rw-r--r--drivers/acpi/acpica/exdump.c34
-rw-r--r--drivers/acpi/acpica/exmisc.c9
-rw-r--r--drivers/acpi/acpica/exoparg2.c3
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c9
-rw-r--r--drivers/acpi/acpica/nsaccess.c28
-rw-r--r--drivers/acpi/acpica/nsarguments.c21
-rw-r--r--drivers/acpi/acpica/nsinit.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c9
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/psloop.c14
-rw-r--r--drivers/acpi/acpica/psobject.c26
-rw-r--r--drivers/acpi/acpica/rsxface.c7
-rw-r--r--drivers/acpi/acpica/tbdata.c230
-rw-r--r--drivers/acpi/acpica/tbinstal.c161
-rw-r--r--drivers/acpi/acpica/tbxface.c39
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/uthex.c4
-rw-r--r--drivers/acpi/acpica/utmath.c222
-rw-r--r--drivers/acpi/acpica/utmisc.c10
-rw-r--r--drivers/acpi/acpica/utobject.c5
-rw-r--r--drivers/acpi/acpica/utprint.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c7
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c9
-rw-r--r--drivers/acpi/acpica/uttrack.c9
-rw-r--r--drivers/acpi/arm64/iort.c57
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/device_pm.c175
-rw-r--r--drivers/acpi/ec.c63
-rw-r--r--drivers/acpi/internal.h7
-rw-r--r--drivers/acpi/osi.c37
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c21
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/resource.c82
-rw-r--r--drivers/acpi/sbs.c25
-rw-r--r--drivers/acpi/scan.c105
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/spcr.c45
-rw-r--r--drivers/acpi/sysfs.c12
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/x86/apple.c141
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/base/firmware_class.c49
-rw-r--r--drivers/block/loop.c42
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/sunvdc.c61
-rw-r--r--drivers/block/virtio_blk.c16
-rw-r--r--drivers/block/xen-blkback/xenbus.c10
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/zram/zram_drv.c4
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/em_sti.c11
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/dma-buf/sync_file.c5
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/firmware/efi/apple-properties.c5
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c23
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c181
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c41
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c26
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c5
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c10
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c29
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c82
-rw-r--r--drivers/infiniband/core/addr.c62
-rw-r--r--drivers/infiniband/core/device.c5
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c15
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c123
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c33
-rw-r--r--drivers/input/joystick/xpad.c24
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/elan_i2c_core.c5
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/trackpoint.c7
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/arm-smmu.c7
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c13
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c40
-rw-r--r--drivers/irqchip/irq-gic-v3.c16
-rw-r--r--drivers/irqchip/irq-gic.c14
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid5-cache.c61
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/mfd/atmel-smc.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/mmc/core/block.c57
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-xenon.c19
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c15
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/nand_base.c13
-rw-r--r--drivers/mtd/nand/nand_timings.c6
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c33
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c52
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c18
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvme/host/core.c35
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/pci.c45
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/nvme/target/admin-cmd.c6
-rw-r--r--drivers/nvme/target/fc.c217
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/pci/msi.c13
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c44
-rw-r--r--drivers/pci/probe.c43
-rw-r--r--drivers/pci/quirks.c94
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c11
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/aachba.c16
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c68
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c64
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c12
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c33
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/spi/spi.c32
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/thunderbolt/icm.c13
-rw-r--r--drivers/thunderbolt/tb.c4
-rw-r--r--drivers/tty/pty.c64
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/amba-pl011.c37
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/gadget.c33
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/host/pci-quirks.c37
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
453 files changed, 5373 insertions, 2623 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b1aacfc62b1f..90265ab4437a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
+acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f098e25b6b41..86c10599d9f8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -670,7 +670,7 @@ err:
}
-void __init acpi_processor_check_duplicates(void)
+static void __init acpi_processor_check_duplicates(void)
{
/* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index b125bdd3d58b..1709551bc4aa 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -18,6 +18,7 @@ acpi-y := \
dsmthdat.o \
dsobject.o \
dsopcode.o \
+ dspkginit.o \
dsutils.o \
dswexec.o \
dswload.o \
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index bb6a84b0b4b3..7a1a68b5ac5c 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -114,6 +114,8 @@ ac_get_all_tables_from_file(char *filename,
u8 get_only_aml_tables,
struct acpi_new_table_desc **return_list_head);
+void ac_delete_table_list(struct acpi_new_table_desc *list_head);
+
u8 ac_is_file_binary(FILE * file);
acpi_status ac_validate_table_header(FILE * file, long table_offset);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 0d95c85cce06..f8f3a6e74128 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -237,6 +237,11 @@ acpi_ds_initialize_objects(u32 table_index,
* dsobject - Parser/Interpreter interface - object initialization and conversion
*/
acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr);
+
+acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
@@ -259,6 +264,14 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
+ * dspkginit - Package object initialization
+ */
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context);
+
+/*
* dsutils - Parser/Interpreter interface utility routines
*/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 8ddd3b20e0c6..0d45b8bb1678 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -199,6 +199,7 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
@@ -604,7 +605,7 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON u16 index;
+ ACPI_STATE_COMMON u32 index;
union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
@@ -867,7 +868,7 @@ struct acpi_parse_obj_named {
/* This version is used by the iASL compiler only */
-#define ACPI_MAX_PARSEOP_NAME 20
+#define ACPI_MAX_PARSEOP_NAME 20
struct acpi_parse_obj_asl {
ACPI_PARSE_COMMON union acpi_parse_object *child;
@@ -907,7 +908,7 @@ union acpi_parse_object {
struct asl_comment_state {
u8 comment_type;
u32 spaces_before;
- union acpi_parse_object *latest_parse_node;
+ union acpi_parse_object *latest_parse_op;
union acpi_parse_object *parsing_paren_brace_node;
u8 capture_comments;
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 27c3f982d810..5226146190bf 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -122,7 +122,9 @@ struct acpi_object_integer {
_type *pointer; \
u32 length;
-struct acpi_object_string { /* Null terminated, ASCII characters only */
+/* Null terminated, ASCII characters only */
+
+struct acpi_object_string {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
@@ -211,7 +213,9 @@ struct acpi_object_method {
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
union acpi_operand_object *handler; /* Handler for Address space */
-struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+
+struct acpi_object_notify_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
struct acpi_object_device {
@@ -258,7 +262,9 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u8 access_length; /* For serial regions/fields */
-struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+
+struct acpi_object_field_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
@@ -333,11 +339,12 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
- u8 reserved;
+ u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node; /* ref_of or Namepath */
union acpi_operand_object **where; /* Target of Index */
u8 *index_pointer; /* Used for Buffers and Strings */
+ u8 *aml; /* Used for deferred resolution of the ref */
u32 value; /* Used for Local/Arg/Index/ddb_handle */
};
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c8da453bd960..84a3ceb6e384 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -76,7 +76,8 @@ void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc);
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature);
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index);
u8 acpi_tb_is_table_loaded(u32 table_index);
@@ -132,6 +133,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
acpi_status acpi_tb_unload_table(u32 table_index);
+void acpi_tb_notify_table(u32 event, void *table);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2a3cc4296481..745134ade35f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -516,7 +516,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index);
+ u32 index);
acpi_status
acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
@@ -538,6 +538,13 @@ acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder);
+acpi_status
+acpi_ut_short_multiply(u64 in_multiplicand, u32 multiplier, u64 *outproduct);
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result);
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result);
+
/*
* utmisc
*/
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 46bf270ac525..5a606eac0c22 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -310,7 +310,7 @@ dump_node:
}
else {
- acpi_os_printf("Object (%p) Pathname: %s\n",
+ acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n",
node, (char *)ret_buf.pointer);
}
@@ -326,7 +326,7 @@ dump_node:
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
- acpi_os_printf("\nAttached Object (%p):\n", obj_desc);
+ acpi_os_printf("\nAttached Object %p:", obj_desc);
if (!acpi_os_readable
(obj_desc, sizeof(union acpi_operand_object))) {
acpi_os_printf
@@ -335,9 +335,36 @@ dump_node:
return;
}
- acpi_ut_debug_dump_buffer((void *)obj_desc,
- sizeof(union acpi_operand_object),
- display, ACPI_UINT32_MAX);
+ if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *)
+ obj_desc)) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" Namespace Node - ");
+ status =
+ acpi_get_name((struct acpi_namespace_node *)
+ obj_desc,
+ ACPI_FULL_PATHNAME_NO_TRAILING,
+ &ret_buf);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf
+ ("Could not convert name to pathname\n");
+ } else {
+ acpi_os_printf("Pathname: %s",
+ (char *)ret_buf.pointer);
+ }
+
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(struct
+ acpi_namespace_node),
+ display, ACPI_UINT32_MAX);
+ } else {
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(union
+ acpi_operand_object),
+ display, ACPI_UINT32_MAX);
+ }
+
acpi_ex_dump_object_descriptor(obj_desc, 1);
}
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c5dccc54307d..7bcf5f5ea029 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -184,6 +184,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
/* Execute flag should always be set when this function is entered */
if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ ACPI_ERROR((AE_INFO, "Parse execute mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -556,6 +557,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
return_ACPI_STATUS(AE_OK);
}
+ ACPI_ERROR((AE_INFO, "Parse deferred mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 7df3152ed856..82448551781b 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -52,12 +52,6 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
-/* Local prototypes */
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- union acpi_operand_object **obj_desc_ptr);
-
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
@@ -73,15 +67,13 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* Simple objects are any objects other than a package object!
*
******************************************************************************/
-
-static acpi_status
+acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@@ -89,140 +81,47 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
- * previously looked up in the namespace, it was stored in this op.
- * Otherwise, go ahead and look it up now
+ * previously looked up in the namespace, it was stored in
+ * this op. Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
- status = acpi_ns_lookup(walk_state->scope_info,
- op->common.value.string,
- ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT |
- ACPI_NS_DONT_OPEN_SCOPE, NULL,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_namespace_node,
- &(op->
- common.
- node)));
- if (ACPI_FAILURE(status)) {
-
- /* Check if we are resolving a named reference within a package */
-
- if ((status == AE_NOT_FOUND)
- && (acpi_gbl_enable_interpreter_slack)
- &&
- ((op->common.parent->common.aml_opcode ==
- AML_PACKAGE_OP)
- || (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP))) {
- /*
- * We didn't find the target and we are populating elements
- * of a package - ignore if slack enabled. Some ASL code
- * contains dangling invalid references in packages and
- * expects that no exception will be issued. Leave the
- * element as a null element. It cannot be used, but it
- * can be overwritten by subsequent ASL code - this is
- * typically the case.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Ignoring unresolved reference in package [%4.4s]\n",
- walk_state->
- scope_info->scope.
- node->name.ascii));
-
- return_ACPI_STATUS(AE_OK);
- } else {
- ACPI_ERROR_NAMESPACE(op->common.value.
- string, status);
- }
-
- return_ACPI_STATUS(status);
- }
- }
-
- /* Special object resolution for elements of a package */
-
- if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP)) {
- /*
- * Attempt to resolve the node to a value before we insert it into
- * the package. If this is a reference to a common data type,
- * resolve it immediately. According to the ACPI spec, package
- * elements can only be "data objects" or method references.
- * Attempt to resolve to an Integer, Buffer, String or Package.
- * If cannot, return the named reference (for things like Devices,
- * Methods, etc.) Buffer Fields and Fields will resolve to simple
- * objects (int/buf/str/pkg).
- *
- * NOTE: References to things like Devices, Methods, Mutexes, etc.
- * will remain as named references. This behavior is not described
- * in the ACPI spec, but it appears to be an oversight.
- */
- obj_desc =
- ACPI_CAST_PTR(union acpi_operand_object,
- op->common.node);
-
- status =
- acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &obj_desc),
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Special handling for Alias objects. We need to setup the type
- * and the Op->Common.Node to point to the Alias target. Note,
- * Alias has at most one level of indirection internally.
- */
- type = op->common.node->type;
- if (type == ACPI_TYPE_LOCAL_ALIAS) {
- type = obj_desc->common.type;
- op->common.node =
- ACPI_CAST_PTR(struct acpi_namespace_node,
- op->common.node->object);
- }
-
- switch (type) {
- /*
- * For these types, we need the actual node, not the subobject.
- * However, the subobject did not get an extra reference count above.
- *
- * TBD: should ex_resolve_node_to_value be changed to fix this?
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_THERMAL:
-
- acpi_ut_add_reference(op->common.node->object);
- /*lint -fallthrough */
- /*
- * For these types, we need the actual node, not the subobject.
- * The subobject got an extra reference count in ex_resolve_node_to_value.
- */
- case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_EVENT:
- case ACPI_TYPE_REGION:
-
- /* We will create a reference object for these types below */
- break;
+ /* Check if we are resolving a named reference within a package */
- default:
+ if ((op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VARIABLE_PACKAGE_OP)) {
/*
- * All other types - the node was resolved to an actual
- * object, we are done.
+ * We won't resolve package elements here, we will do this
+ * after all ACPI tables are loaded into the namespace. This
+ * behavior supports both forward references to named objects
+ * and external references to objects in other tables.
*/
- goto exit;
+ goto create_new_object;
+ } else {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ op->common.value.string,
+ ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE,
+ NULL,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &(op->common.node)));
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(op->common.value.
+ string, status);
+ return_ACPI_STATUS(status);
+ }
}
}
}
+create_new_object:
+
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
@@ -240,7 +139,27 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
-exit:
+ /*
+ * Handling for unresolved package reference elements.
+ * These are elements that are namepaths.
+ */
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ obj_desc->reference.resolved = TRUE;
+
+ if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ !obj_desc->reference.node) {
+ /*
+ * Name was unresolved above.
+ * Get the prefix node for later lookup
+ */
+ obj_desc->reference.node =
+ walk_state->scope_info->scope.node;
+ obj_desc->reference.aml = op->common.aml;
+ obj_desc->reference.resolved = FALSE;
+ }
+ }
+
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
@@ -351,200 +270,6 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
- * FUNCTION: acpi_ds_build_internal_package_obj
- *
- * PARAMETERS: walk_state - Current walk state
- * op - Parser object to be translated
- * element_count - Number of elements in the package - this is
- * the num_elements argument to Package()
- * obj_desc_ptr - Where the ACPI internal object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- * namespace object
- *
- * NOTE: The number of elements in the package will be always be the num_elements
- * count, regardless of the number of elements in the package list. If
- * num_elements is smaller, only that many package list elements are used.
- * if num_elements is larger, the Package object is padded out with
- * objects of type Uninitialized (as per ACPI spec.)
- *
- * Even though the ASL compilers do not allow num_elements to be smaller
- * than the Package list length (for the fixed length package opcode), some
- * BIOS code modifies the AML on the fly to adjust the num_elements, and
- * this code compensates for that. This also provides compatibility with
- * other AML interpreters.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- u32 element_count,
- union acpi_operand_object **obj_desc_ptr)
-{
- union acpi_parse_object *arg;
- union acpi_parse_object *parent;
- union acpi_operand_object *obj_desc = NULL;
- acpi_status status = AE_OK;
- u32 i;
- u16 index;
- u16 reference_count;
-
- ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
-
- /* Find the parent of a possibly nested package */
-
- parent = op->common.parent;
- while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
- parent = parent->common.parent;
- }
-
- /*
- * If we are evaluating a Named package object "Name (xxxx, Package)",
- * the package object already exists, otherwise it must be created.
- */
- obj_desc = *obj_desc_ptr;
- if (!obj_desc) {
- obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
- *obj_desc_ptr = obj_desc;
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.node = parent->common.node;
- }
-
- /*
- * Allocate the element array (array of pointers to the individual
- * objects) based on the num_elements parameter. Add an extra pointer slot
- * so that the list is always null terminated.
- */
- obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
- element_count +
- 1) * sizeof(void *));
-
- if (!obj_desc->package.elements) {
- acpi_ut_delete_object_desc(obj_desc);
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.count = element_count;
-
- /*
- * Initialize the elements of the package, up to the num_elements count.
- * Package is automatically padded with uninitialized (NULL) elements
- * if num_elements is greater than the package list length. Likewise,
- * Package is truncated if num_elements is less than the list length.
- */
- arg = op->common.value.arg;
- arg = arg->common.next;
- for (i = 0; arg && (i < element_count); i++) {
- if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
- if (arg->common.node->type == ACPI_TYPE_METHOD) {
- /*
- * A method reference "looks" to the parser to be a method
- * invocation, so we special case it here
- */
- arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
- status =
- acpi_ds_build_internal_object(walk_state,
- arg,
- &obj_desc->
- package.
- elements[i]);
- } else {
- /* This package element is already built, just get it */
-
- obj_desc->package.elements[i] =
- ACPI_CAST_PTR(union acpi_operand_object,
- arg->common.node);
- }
- } else {
- status =
- acpi_ds_build_internal_object(walk_state, arg,
- &obj_desc->package.
- elements[i]);
- }
-
- if (*obj_desc_ptr) {
-
- /* Existing package, get existing reference count */
-
- reference_count =
- (*obj_desc_ptr)->common.reference_count;
- if (reference_count > 1) {
-
- /* Make new element ref count match original ref count */
-
- for (index = 0; index < (reference_count - 1);
- index++) {
- acpi_ut_add_reference((obj_desc->
- package.
- elements[i]));
- }
- }
- }
-
- arg = arg->common.next;
- }
-
- /* Check for match between num_elements and actual length of package_list */
-
- if (arg) {
- /*
- * num_elements was exhausted, but there are remaining elements in the
- * package_list. Truncate the package to num_elements.
- *
- * Note: technically, this is an error, from ACPI spec: "It is an error
- * for NumElements to be less than the number of elements in the
- * PackageList". However, we just print a message and
- * no exception is returned. This provides Windows compatibility. Some
- * BIOSs will alter the num_elements on the fly, creating this type
- * of ill-formed package object.
- */
- while (arg) {
- /*
- * We must delete any package elements that were created earlier
- * and are not going to be used because of the package truncation.
- */
- if (arg->common.node) {
- acpi_ut_remove_reference(ACPI_CAST_PTR
- (union
- acpi_operand_object,
- arg->common.node));
- arg->common.node = NULL;
- }
-
- /* Find out how many elements there really are */
-
- i++;
- arg = arg->common.next;
- }
-
- ACPI_INFO(("Actual Package length (%u) is larger than "
- "NumElements field (%u), truncated",
- i, element_count));
- } else if (i < element_count) {
- /*
- * Arg list (elements) was exhausted, but we did not reach num_elements count.
- * Note: this is not an error, the package is padded out with NULLs.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (%u) smaller than NumElements "
- "count (%u), padded with null elements\n",
- i, element_count));
- }
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
- op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
@@ -662,11 +387,20 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_PACKAGE:
/*
- * Defer evaluation of Package term_arg operand
+ * Defer evaluation of Package term_arg operand and all
+ * package elements. (01/2017): We defer the element
+ * resolution to allow forward references from the package
+ * in order to provide compatibility with other ACPI
+ * implementations.
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
+
+ if (!op->named.data) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
@@ -818,9 +552,11 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
- obj_desc->reference.object =
- op->common.node->object;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ if (op->common.node) {
+ obj_desc->reference.object =
+ op->common.node->object;
+ }
break;
case AML_DEBUG_OP:
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dfc3c25a083d..0336df7ac47d 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -599,6 +599,15 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
*/
walk_state->operand_index = walk_state->num_operands;
+ /* Ignore if child is not valid */
+
+ if (!op->common.value.arg) {
+ ACPI_ERROR((AE_INFO,
+ "Dispatch: Missing child while executing TermArg for %X",
+ op->common.aml_opcode));
+ return_ACPI_STATUS(AE_OK);
+ }
+
status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
new file mode 100644
index 000000000000..6d487edfe2de
--- /dev/null
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -0,0 +1,496 @@
+/******************************************************************************
+ *
+ * Module Name: dspkginit - Completion of deferred package initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("dspkginit")
+
+/* Local prototypes */
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * op - Parser object to be translated
+ * element_count - Number of elements in the package - this is
+ * the num_elements argument to Package()
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 element_count,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_parse_object *parent;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_status status = AE_OK;
+ u16 reference_count;
+ u32 index;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+ /* Find the parent of a possibly nested package */
+
+ parent = op->common.parent;
+ while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ parent = parent->common.parent;
+ }
+
+ /*
+ * If we are evaluating a Named package object of the form:
+ * Name (xxxx, Package)
+ * the package object already exists, otherwise it must be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.node = parent->common.node;
+ }
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Allocate the element array (array of pointers to the individual
+ * objects) based on the num_elements parameter. Add an extra pointer slot
+ * so that the list is always null terminated.
+ */
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ element_count +
+ 1) * sizeof(void *));
+
+ if (!obj_desc->package.elements) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.count = element_count;
+ arg = op->common.value.arg;
+ arg = arg->common.next;
+
+ if (arg) {
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ /*
+ * Initialize the elements of the package, up to the num_elements count.
+ * Package is automatically padded with uninitialized (NULL) elements
+ * if num_elements is greater than the package list length. Likewise,
+ * Package is truncated if num_elements is less than the list length.
+ */
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (arg->common.node->type == ACPI_TYPE_METHOD) {
+ /*
+ * A method reference "looks" to the parser to be a method
+ * invocation, so we special case it here
+ */
+ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+ status =
+ acpi_ds_build_internal_object(walk_state,
+ arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ } else {
+ /* This package element is already built, just get it */
+
+ obj_desc->package.elements[i] =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ arg->common.node);
+ }
+ } else {
+ status =
+ acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->package.
+ elements[i]);
+ if (status == AE_NOT_FOUND) {
+ ACPI_ERROR((AE_INFO, "%-48s",
+ "****DS namepath not found"));
+ }
+
+ /*
+ * Initialize this package element. This function handles the
+ * resolution of named references within the package.
+ */
+ acpi_ds_init_package_element(0,
+ obj_desc->package.
+ elements[i], NULL,
+ &obj_desc->package.
+ elements[i]);
+ }
+
+ if (*obj_desc_ptr) {
+
+ /* Existing package, get existing reference count */
+
+ reference_count =
+ (*obj_desc_ptr)->common.reference_count;
+ if (reference_count > 1) {
+
+ /* Make new element ref count match original ref count */
+ /* TBD: Probably need an acpi_ut_add_references function */
+
+ for (index = 0;
+ index < ((u32)reference_count - 1);
+ index++) {
+ acpi_ut_add_reference((obj_desc->
+ package.
+ elements[i]));
+ }
+ }
+ }
+
+ arg = arg->common.next;
+ }
+
+ /* Check for match between num_elements and actual length of package_list */
+
+ if (arg) {
+ /*
+ * num_elements was exhausted, but there are remaining elements in
+ * the package_list. Truncate the package to num_elements.
+ *
+ * Note: technically, this is an error, from ACPI spec: "It is an
+ * error for NumElements to be less than the number of elements in
+ * the PackageList". However, we just print a message and no
+ * exception is returned. This provides compatibility with other
+ * ACPI implementations. Some firmware implementations will alter
+ * the num_elements on the fly, possibly creating this type of
+ * ill-formed package object.
+ */
+ while (arg) {
+ /*
+ * We must delete any package elements that were created earlier
+ * and are not going to be used because of the package truncation.
+ */
+ if (arg->common.node) {
+ acpi_ut_remove_reference(ACPI_CAST_PTR
+ (union
+ acpi_operand_object,
+ arg->common.node));
+ arg->common.node = NULL;
+ }
+
+ /* Find out how many elements there really are */
+
+ i++;
+ arg = arg->common.next;
+ }
+
+ ACPI_INFO(("Actual Package length (%u) is larger than "
+ "NumElements field (%u), truncated",
+ i, element_count));
+ } else if (i < element_count) {
+ /*
+ * Arg list (elements) was exhausted, but we did not reach
+ * num_elements count.
+ *
+ * Note: this is not an error, the package is padded out
+ * with NULLs.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Package List length (%u) smaller than NumElements "
+ "count (%u), padded with null elements\n",
+ i, element_count));
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_package_element
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve a named reference element within a package object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context)
+{
+ union acpi_operand_object **element_ptr;
+
+ if (!source_object) {
+ return (AE_OK);
+ }
+
+ /*
+ * The following code is a bit of a hack to workaround a (current)
+ * limitation of the acpi_pkg_callback interface. We need a pointer
+ * to the location within the element array because a new object
+ * may be created and stored there.
+ */
+ if (context) {
+
+ /* A direct call was made to this function */
+
+ element_ptr = (union acpi_operand_object **)context;
+ } else {
+ /* Call came from acpi_ut_walk_package_tree */
+
+ element_ptr = state->pkg.this_target_obj;
+ }
+
+ /* We are only interested in reference objects/elements */
+
+ if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Attempt to resolve the (named) reference to a namespace node */
+
+ acpi_ds_resolve_package_element(element_ptr);
+ } else if (source_object->common.type == ACPI_TYPE_PACKAGE) {
+ source_object->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_resolve_package_element
+ *
+ * PARAMETERS: element_ptr - Pointer to a reference object
+ *
+ * RETURN: Possible new element is stored to the indirect element_ptr
+ *
+ * DESCRIPTION: Resolve a package element that is a reference to a named
+ * object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
+{
+ acpi_status status;
+ union acpi_generic_state scope_info;
+ union acpi_operand_object *element = *element_ptr;
+ struct acpi_namespace_node *resolved_node;
+ char *external_path = NULL;
+ acpi_object_type type;
+
+ ACPI_FUNCTION_TRACE(ds_resolve_package_element);
+
+ /* Check if reference element is already resolved */
+
+ if (element->reference.resolved) {
+ return_VOID;
+ }
+
+ /* Element must be a reference object of correct type */
+
+ scope_info.scope.node = element->reference.node; /* Prefix node */
+
+ status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, /* Pointer to AML path */
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+ NULL, &resolved_node);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ (char *)element->reference.
+ aml, NULL, &external_path);
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not find/resolve named package element: %s",
+ external_path));
+
+ ACPI_FREE(external_path);
+ *element_ptr = NULL;
+ return_VOID;
+ } else if (resolved_node->type == ACPI_TYPE_ANY) {
+
+ /* Named reference not resolved, return a NULL package element */
+
+ ACPI_ERROR((AE_INFO,
+ "Could not resolve named package element [%4.4s] in [%4.4s]",
+ resolved_node->name.ascii,
+ scope_info.scope.node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#if 0
+ else if (resolved_node->flags & ANOBJ_TEMPORARY) {
+ /*
+ * A temporary node found here indicates that the reference is
+ * to a node that was created within this method. We are not
+ * going to allow it (especially if the package is returned
+ * from the method) -- the temporary node will be deleted out
+ * from under the method. (05/2017).
+ */
+ ACPI_ERROR((AE_INFO,
+ "Package element refers to a temporary name [%4.4s], "
+ "inserting a NULL element",
+ resolved_node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#endif
+
+ /*
+ * Special handling for Alias objects. We need resolved_node to point
+ * to the Alias target. This effectively "resolves" the alias.
+ */
+ if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+ resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ resolved_node->object);
+ }
+
+ /* Update the reference object */
+
+ element->reference.resolved = TRUE;
+ element->reference.node = resolved_node;
+ type = element->reference.node->type;
+
+ /*
+ * Attempt to resolve the node to a value before we insert it into
+ * the package. If this is a reference to a common data type,
+ * resolve it immediately. According to the ACPI spec, package
+ * elements can only be "data objects" or method references.
+ * Attempt to resolve to an Integer, Buffer, String or Package.
+ * If cannot, return the named reference (for things like Devices,
+ * Methods, etc.) Buffer Fields and Fields will resolve to simple
+ * objects (int/buf/str/pkg).
+ *
+ * NOTE: References to things like Devices, Methods, Mutexes, etc.
+ * will remain as named references. This behavior is not described
+ * in the ACPI spec, but it appears to be an oversight.
+ */
+ status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+#if 0
+/* TBD - alias support */
+ /*
+ * Special handling for Alias objects. We need to setup the type
+ * and the Op->Common.Node to point to the Alias target. Note,
+ * Alias has at most one level of indirection internally.
+ */
+ type = op->common.node->type;
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = obj_desc->common.type;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ op->common.node->object);
+ }
+#endif
+
+ switch (type) {
+ /*
+ * These object types are a result of named references, so we will
+ * leave them as reference objects. In other words, these types
+ * have no intrinsic "value".
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+
+ /* TBD: This may not be necesssary */
+
+ acpi_ut_add_reference(resolved_node->object);
+ break;
+
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_METHOD:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ break;
+
+ default:
+ /*
+ * For all other types - the node was resolved to an actual
+ * operand object with a value, return the object
+ */
+ *element_ptr = (union acpi_operand_object *)resolved_node;
+ break;
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c941947a063..3a3cb8624f41 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *ignored)
{
acpi_status status;
+ acpi_event_status event_status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_block->block_base_number + gpe_index;
/*
* Ignore GPEs that have no corresponding _Lxx/_Exx method
- * and GPEs that are used to wake the system
+ * and GPEs that are used for wakeup
*/
- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_NONE)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_HANDLER)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_RAW_HANDLER)
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_METHOD)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
+ event_status = 0;
+ (void)acpi_hw_get_gpe_status(gpe_event_info,
+ &event_status);
+
status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index +
- gpe_block->block_base_number));
+ gpe_number));
continue;
}
+ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
+
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+ (void)acpi_ev_gpe_dispatch(gpe_block->node,
+ gpe_event_info,
+ gpe_number);
+ }
+
gpe_enabled_count++;
}
}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 57718a3e029a..67c7c4ce276c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
*/
gpe_event_info->flags =
(ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
+ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
+ /*
+ * A reference to this GPE has been added during the GPE block
+ * initialization, so drop it now to prevent the GPE from being
+ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
+ */
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
+ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
}
/*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index d43d7da4c734..b8adb11f1b07 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -87,68 +87,40 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
target_node->object);
}
- /*
- * For objects that can never change (i.e., the NS node will
- * permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
- * Integers, buffers, etc.), we have to point the Alias node
- * to the original Node.
- */
- switch (target_node->type) {
+ /* Ensure that the target node is valid */
- /* For these types, the sub-object can change dynamically via a Store */
+ if (!target_node) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
- case ACPI_TYPE_PACKAGE:
- case ACPI_TYPE_BUFFER_FIELD:
- /*
- * These types open a new scope, so we need the NS node in order to access
- * any children.
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
- case ACPI_TYPE_LOCAL_SCOPE:
- /*
- * The new alias has the type ALIAS and points to the original
- * NS node, not the object itself.
- */
- alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
- break;
+ /* Construct the alias object (a namespace node) */
+ switch (target_node->type) {
case ACPI_TYPE_METHOD:
/*
- * Control method aliases need to be differentiated
+ * Control method aliases need to be differentiated with
+ * a special type
*/
alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
default:
-
- /* Attach the original source object to the new Alias Node */
-
/*
- * The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
- * additional reference to prevent deletion out from under either the
- * target node or the alias Node
+ * All other object types.
+ *
+ * The new alias has the type ALIAS and points to the original
+ * NS node, not the object itself.
*/
- status = acpi_ns_attach_object(alias_node,
- acpi_ns_get_attached_object
- (target_node),
- target_node->type);
+ alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
}
/* Since both operands are Nodes, we don't need to delete them */
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 44092f744477..83398dc4b7c2 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -102,7 +102,7 @@ static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
- {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
{ACPI_EXD_PACKAGE, 0, NULL}
};
@@ -384,6 +384,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
count = info->offset;
while (count) {
+ if (!obj_desc) {
+ return;
+ }
+
target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
name = info->name;
@@ -469,9 +473,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
start = *ACPI_CAST_PTR(void *, target);
next = start;
- acpi_os_printf("%20s : %p", name, next);
+ acpi_os_printf("%20s : %p ", name, next);
if (next) {
- acpi_os_printf("(%s %2.2X)",
+ acpi_os_printf("%s (Type %2.2X)",
acpi_ut_get_object_type_name
(next), next->common.type);
@@ -493,6 +497,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
break;
}
}
+ } else {
+ acpi_os_printf("- No attached objects");
}
acpi_os_printf("\n");
@@ -1129,7 +1135,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
default:
- acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type);
+ acpi_os_printf("[%s] Type: %2.2X\n",
+ acpi_ut_get_type_name(obj_desc->common.type),
+ obj_desc->common.type);
break;
}
}
@@ -1167,11 +1175,17 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
obj_desc, flags);
- acpi_os_printf("\nAttached Object (%p):\n",
- ((struct acpi_namespace_node *)obj_desc)->
- object);
-
obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ if (!obj_desc) {
+ return_VOID;
+ }
+
+ acpi_os_printf("\nAttached Object %p", obj_desc);
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" - Namespace Node");
+ }
+
+ acpi_os_printf(":\n");
goto dump_object;
}
@@ -1191,6 +1205,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
dump_object:
+ if (!obj_desc) {
+ return_VOID;
+ }
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index f222a80ca38e..1e7649ce0a7b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -265,6 +265,8 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid numeric logical opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -345,6 +347,9 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type for logical operator: %X",
+ operand0->common.type));
status = AE_AML_INTERNAL;
break;
}
@@ -388,6 +393,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -456,6 +463,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index eecb3bff7fd7..57980b7d3594 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -414,6 +414,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type: %X",
+ (operand[0])->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index de74a4c25085..acb417b58bbb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -107,7 +107,7 @@ acpi_hw_get_access_bit_width(u64 address,
ACPI_IS_ALIGNED(reg->bit_width, 8)) {
access_bit_width = reg->bit_width;
} else if (reg->access_width) {
- access_bit_width = (1 << (reg->access_width + 2));
+ access_bit_width = ACPI_ACCESS_BIT_WIDTH(reg->access_width);
} else {
access_bit_width =
ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset +
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 7ef13934968f..e5c095ca6083 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -72,13 +72,16 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_sleep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake_prep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) }
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake)}
};
/*
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index e5f4fa496572..f2733f51ca8d 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -292,6 +292,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
{
acpi_status status;
char *path = pathname;
+ char *external_path;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *current_node = NULL;
struct acpi_namespace_node *this_node = NULL;
@@ -427,13 +428,22 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = this_node->parent;
if (!this_node) {
+ /*
+ * Current scope has no parent scope. Externalize
+ * the internal path for error message.
+ */
+ status =
+ acpi_ns_externalize_name
+ (ACPI_UINT32_MAX, pathname, NULL,
+ &external_path);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Path has too many parent prefixes (^)",
+ external_path));
+
+ ACPI_FREE(external_path);
+ }
- /* Current scope has no parent scope */
-
- ACPI_ERROR((AE_INFO,
- "%s: Path has too many parent prefixes (^) "
- "- reached beyond root node",
- pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
@@ -634,6 +644,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag &&
+ (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags |= IMPLICIT_EXTERNAL;
+ }
+#endif
}
/* Special handling for the last segment (num_segments == 0) */
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 9095d51f6b37..67b7370dcae5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -69,9 +69,14 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
u8 user_arg_type;
u32 i;
- /* If not a predefined name, cannot typecheck args */
-
- if (!info->predefined) {
+ /*
+ * If not a predefined name, cannot typecheck args, because
+ * we have no idea what argument types are expected.
+ * Also, ignore typecheck if warnings/errors if this method
+ * has already been evaluated at least once -- in order
+ * to suppress repetitive messages.
+ */
+ if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -93,6 +98,10 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
acpi_ut_get_type_name
(user_arg_type),
acpi_ut_get_type_name(arg_type)));
+
+ /* Prevent any additional typechecking for this method */
+
+ info->node->flags |= ANOBJ_EVALUATED;
}
}
}
@@ -121,7 +130,7 @@ acpi_ns_check_acpi_compliance(char *pathname,
u32 aml_param_count;
u32 required_param_count;
- if (!predefined) {
+ if (!predefined || (node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -215,6 +224,10 @@ acpi_ns_check_argument_count(char *pathname,
u32 aml_param_count;
u32 required_param_count;
+ if (node->flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
if (!predefined) {
/*
* Not a predefined name. Check the incoming user argument count
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index ce33e7297ea7..9c6297949712 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -396,6 +396,20 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
info->package_init++;
status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /*
+ * Resolve all named references in package objects (and all
+ * sub-packages). This action has been deferred until the entire
+ * namespace has been loaded, in order to support external and
+ * forward references from individual package elements (05/2017).
+ */
+ status = acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element,
+ NULL);
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
break;
default:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index aa16aeaa8937..a410760a0308 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -89,7 +89,14 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
{
acpi_size size;
- ACPI_FUNCTION_ENTRY();
+ /* Validate the Node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/cached reference target node: %p, descriptor type %d",
+ node, ACPI_GET_DESCRIPTOR_TYPE(node)));
+ return (0);
+ }
size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
return (size);
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 4954cb6c9090..a8ea8fb1d299 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -614,6 +614,8 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
default: /* Should not get here, type was validated by caller */
+ ACPI_ERROR((AE_INFO, "Invalid Package type: %X",
+ package->ret_info.type));
return (AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
- status = acpi_get_handle(handle, pathname, &target_handle);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (pathname) {
+ status = acpi_get_handle(handle, pathname, &target_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b4224005783c..bb04dec168ad 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -164,6 +164,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Final argument count: %u pass %u\n",
+ walk_state->arg_count,
+ walk_state->pass_number));
+
/*
* Handle executable code at "module-level". This refers to
* executable opcodes that appear outside of any control method.
@@ -277,6 +282,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
AML_NAME_OP)
&& (walk_state->pass_number <=
ACPI_IMODE_LOAD_PASS2)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Setup Package/Buffer: Pass %u, AML Ptr: %p\n",
+ walk_state->pass_number,
+ aml_op_start));
+
/*
* Skip parsing of Buffers and Packages because we don't have
* enough info in the first pass to parse them correctly.
@@ -570,6 +580,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Check for arguments that need to be processed */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Parseloop: argument count: %u\n",
+ walk_state->arg_count));
+
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ef6384e374fc..0bef6df71bba 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -359,6 +359,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
acpi_ps_build_named_op(walk_state, aml_op_start, op,
&named_op);
acpi_ps_free_op(op);
+
+#ifdef ACPI_ASL_COMPILER
+ if (acpi_gbl_disasm_flag
+ && walk_state->opcode == AML_EXTERNAL_OP
+ && status == AE_NOT_FOUND) {
+ /*
+ * If parsing of AML_EXTERNAL_OP's name path fails, then skip
+ * past this opcode and keep parsing. This is a much better
+ * alternative than to abort the entire disassembler. At this
+ * point, the parser_state is at the end of the namepath of the
+ * external declaration opcode. Setting walk_state->Aml to
+ * walk_state->parser_state.Aml + 2 moves increments the
+ * walk_state->Aml past the object type and the paramcount of the
+ * external opcode. For the error message, only print the AML
+ * offset. We could attempt to print the name but this may cause
+ * a segmentation fault when printing the namepath because the
+ * AML may be incorrect.
+ */
+ acpi_os_printf
+ ("// Invalid external declaration at AML offset 0x%x.\n",
+ walk_state->aml -
+ walk_state->parser_state.aml_start);
+ walk_state->aml = walk_state->parser_state.aml + 2;
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+#endif
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 59a4f9ed06a7..be65e65e216e 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -615,7 +615,7 @@ ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
* device we are querying
* name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * METHOD_NAME__AEI or METHOD_NAME__DMA)
* user_function - Called for each resource
* context - Passed to user_function
*
@@ -641,11 +641,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
!ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
+ /* Get the _CRS/_PRS/_AEI/_DMA resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index c9d6fa6d7cc6..b19a2f0ea331 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -50,6 +50,57 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbdata")
+/* Local prototypes */
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index);
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_compare_tables
+ *
+ * PARAMETERS: table_desc - Table 1 descriptor to be compared
+ * table_index - Index of table 2 to be compared
+ *
+ * RETURN: TRUE if both tables are identical.
+ *
+ * DESCRIPTION: This function compares a table with another table that has
+ * already been installed in the root table list.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
+{
+ acpi_status status = AE_OK;
+ u8 is_identical;
+ struct acpi_table_header *table;
+ u32 table_length;
+ u8 table_flags;
+
+ status =
+ acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
+ &table, &table_length, &table_flags);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ is_identical = (u8)((table_desc->length != table_length ||
+ memcmp(table_desc->pointer, table, table_length)) ?
+ FALSE : TRUE);
+
+ /* Release the acquired table */
+
+ acpi_tb_release_table(table, table_length, table_flags);
+ return (is_identical);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_init_table_descriptor
@@ -64,6 +115,7 @@ ACPI_MODULE_NAME("tbdata")
* DESCRIPTION: Initialize a new table descriptor
*
******************************************************************************/
+
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
acpi_physical_address address,
@@ -338,7 +390,7 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
{
- if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) {
+ if (!table_desc->pointer && !acpi_gbl_enable_table_validation) {
/*
* Only validates the header of the table.
* Note that Length contains the size of the mapping after invoking
@@ -354,22 +406,100 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
return (acpi_tb_validate_table(table_desc));
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_check_duplication
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Avoid installing duplicated tables. However table override and
+ * user aided dynamic table load is allowed, thus comparing the
+ * address of the table is not sufficient, and checking the entire
+ * table content is required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_check_duplication);
+
+ /* Check if table is already registered */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+
+ /* Do not compare with unverified tables */
+
+ if (!
+ (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_VERIFIED)) {
+ continue;
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (!acpi_tb_compare_tables(table_desc, i)) {
+ continue;
+ }
+
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+ if (acpi_gbl_root_table_list.tables[i].flags &
+ ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ } else {
+ *table_index = i;
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+ }
+
+ /* Indicate no duplication to the caller */
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_tb_verify_temp_table
*
* PARAMETERS: table_desc - Table descriptor
* signature - Table signature to verify
+ * table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to validate and verify the table, the
* returned table descriptor is in "VALIDATED" state.
+ * Note that 'TableIndex' is required to be set to !NULL to
+ * enable duplication check.
*
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index)
{
acpi_status status = AE_OK;
@@ -392,9 +522,10 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
- /* Verify the checksum */
+ if (acpi_gbl_enable_table_validation) {
+
+ /* Verify the checksum */
- if (acpi_gbl_verify_table_checksum) {
status =
acpi_tb_verify_checksum(table_desc->pointer,
table_desc->length);
@@ -411,9 +542,34 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
+
+ /* Avoid duplications */
+
+ if (table_index) {
+ status =
+ acpi_tb_check_duplication(table_desc, table_index);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ "%4.4s 0x%8.8X%8.8X"
+ " Table is duplicated",
+ acpi_ut_valid_nameseg
+ (table_desc->signature.
+ ascii) ? table_desc->
+ signature.
+ ascii : "????",
+ ACPI_FORMAT_UINT64
+ (table_desc->address)));
+ }
+
+ goto invalidate_and_exit;
+ }
+ }
+
+ table_desc->flags |= ACPI_TABLE_IS_VERIFIED;
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
invalidate_and_exit:
acpi_tb_invalidate_table(table_desc);
@@ -436,6 +592,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
u32 table_count;
+ u32 current_table_count, max_table_count;
+ u32 i;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -455,8 +613,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ max_table_count = table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)max_table_count) *
sizeof(struct acpi_table_desc));
if (!tables) {
ACPI_ERROR((AE_INFO,
@@ -466,9 +624,16 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Copy and free the previous table array */
+ current_table_count = 0;
if (acpi_gbl_root_table_list.tables) {
- memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size)table_count * sizeof(struct acpi_table_desc));
+ for (i = 0; i < table_count; i++) {
+ if (acpi_gbl_root_table_list.tables[i].address) {
+ memcpy(tables + current_table_count,
+ acpi_gbl_root_table_list.tables + i,
+ sizeof(struct acpi_table_desc));
+ current_table_count++;
+ }
+ }
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -476,8 +641,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.max_table_count = max_table_count;
+ acpi_gbl_root_table_list.current_table_count = current_table_count;
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
@@ -818,13 +983,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
acpi_ev_update_gpes(owner_id);
}
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
- }
+ /* Invoke table handler */
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table);
return_ACPI_STATUS(status);
}
@@ -894,15 +1055,11 @@ acpi_status acpi_tb_unload_table(u32 table_index)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Invoke table handler if present */
+ /* Invoke table handler */
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table);
}
/* Delete the portion of the namespace owned by this table */
@@ -918,3 +1075,26 @@ acpi_status acpi_tb_unload_table(u32 table_index)
}
ACPI_EXPORT_SYMBOL(acpi_tb_unload_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_notify_table
+ *
+ * PARAMETERS: event - Table event
+ * table - Validated table pointer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Notify a table event to the users.
+ *
+ ******************************************************************************/
+
+void acpi_tb_notify_table(u32 event, void *table)
+{
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(event, table,
+ acpi_gbl_table_handler_context);
+ }
+}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4620f3c68c13..0dfc0ac3c141 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -48,54 +48,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbinstal")
-/* Local prototypes */
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_compare_tables
- *
- * PARAMETERS: table_desc - Table 1 descriptor to be compared
- * table_index - Index of table 2 to be compared
- *
- * RETURN: TRUE if both tables are identical.
- *
- * DESCRIPTION: This function compares a table with another table that has
- * already been installed in the root table list.
- *
- ******************************************************************************/
-
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
-{
- acpi_status status = AE_OK;
- u8 is_identical;
- struct acpi_table_header *table;
- u32 table_length;
- u8 table_flags;
-
- status =
- acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
- &table, &table_length, &table_flags);
- if (ACPI_FAILURE(status)) {
- return (FALSE);
- }
-
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- is_identical = (u8)((table_desc->length != table_length ||
- memcmp(table_desc->pointer, table, table_length)) ?
- FALSE : TRUE);
-
- /* Release the acquired table */
-
- acpi_tb_release_table(table, table_length, table_flags);
- return (is_identical);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_table_with_override
@@ -112,7 +64,6 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
* table array.
*
******************************************************************************/
-
void
acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
u8 override, u32 *table_index)
@@ -210,95 +161,29 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
- /* Validate and verify a table before installation */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
- if (ACPI_FAILURE(status)) {
- goto release_and_exit;
- }
-
/* Acquire the table lock */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (reload) {
- /*
- * Validate the incoming table signature.
- *
- * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
- * 2) We added support for OEMx tables, signature "OEM".
- * 3) Valid tables were encountered with a null signature, so we just
- * gave up on validating the signature, (05/2008).
- * 4) We encountered non-AML tables such as the MADT, which caused
- * interpreter errors and kernel faults. So now, we once again allow
- * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
- */
- if ((new_table_desc.signature.ascii[0] != 0x00) &&
- (!ACPI_COMPARE_NAME
- (&new_table_desc.signature, ACPI_SIG_SSDT))
- && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
- ACPI_BIOS_ERROR((AE_INFO,
- "Table has invalid signature [%4.4s] (0x%8.8X), "
- "must be SSDT or OEMx",
- acpi_ut_valid_nameseg(new_table_desc.
- signature.
- ascii) ?
- new_table_desc.signature.
- ascii : "????",
- new_table_desc.signature.integer));
-
- status = AE_BAD_SIGNATURE;
- goto unlock_and_exit;
- }
-
- /* Check if table is already registered */
-
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
- ++i) {
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- if (!acpi_tb_compare_tables(&new_table_desc, i)) {
- continue;
- }
+ /* Validate and verify a table before installation */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, &i);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
/*
- * Note: the current mechanism does not unregister a table if it is
- * dynamically unloaded. The related namespace entries are deleted,
- * but the table remains in the root table list.
- *
- * The assumption here is that the number of different tables that
- * will be loaded is actually small, and there is minimal overhead
- * in just keeping the table in case it is needed again.
- *
- * If this assumption changes in the future (perhaps on large
- * machines with many table load/unload operations), tables will
- * need to be unregistered when they are unloaded, and slots in the
- * root table list should be reused when empty.
+ * Table was unloaded, allow it to be reloaded.
+ * As we are going to return AE_OK to the caller, we should
+ * take the responsibility of freeing the input descriptor.
+ * Refill the input descriptor to ensure
+ * acpi_tb_install_table_with_override() can be called again to
+ * indicate the re-installation.
*/
- if (acpi_gbl_root_table_list.tables[i].flags &
- ACPI_TABLE_IS_LOADED) {
-
- /* Table is still loaded, this is an error */
-
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- } else {
- /*
- * Table was unloaded, allow it to be reloaded.
- * As we are going to return AE_OK to the caller, we should
- * take the responsibility of freeing the input descriptor.
- * Refill the input descriptor to ensure
- * acpi_tb_install_table_with_override() can be called again to
- * indicate the re-installation.
- */
- acpi_tb_uninstall_table(&new_table_desc);
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- *table_index = i;
- return_ACPI_STATUS(AE_OK);
- }
+ acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ *table_index = i;
+ return_ACPI_STATUS(AE_OK);
}
+ goto unlock_and_exit;
}
/* Add the table to the global root table list */
@@ -306,14 +191,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
acpi_tb_install_table_with_override(&new_table_desc, override,
table_index);
- /* Invoke table handler if present */
+ /* Invoke table handler */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
- new_table_desc.pointer,
- acpi_gbl_table_handler_context);
- }
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
unlock_and_exit:
@@ -382,9 +263,11 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
finish_override:
- /* Validate and verify a table before overriding */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
+ /*
+ * Validate and verify a table before overriding, no nested table
+ * duplication check as it's too complicated and unnecessary.
+ */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, NULL);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 010b1c43df92..26ad596c973e 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,7 +167,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
- u32 i;
+ struct acpi_table_desc *table_desc;
+ u32 i, j;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -179,6 +180,8 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/*
* Ensure OS early boot logic, which is required by some hosts. If the
* table state is reported to be wrong, developers should fix the
@@ -186,17 +189,39 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
* early stage.
*/
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (acpi_gbl_root_table_list.tables[i].pointer) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (table_desc->pointer) {
ACPI_ERROR((AE_INFO,
"Table [%4.4s] is not invalidated during early boot stage",
- acpi_gbl_root_table_list.tables[i].
- signature.ascii));
+ table_desc->signature.ascii));
}
}
- acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
+ if (!acpi_gbl_enable_table_validation) {
+ /*
+ * Now it's safe to do full table validation. We can do deferred
+ * table initilization here once the flag is set.
+ */
+ acpi_gbl_enable_table_validation = TRUE;
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
+ ++i) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (!(table_desc->flags & ACPI_TABLE_IS_VERIFIED)) {
+ status =
+ acpi_tb_verify_temp_table(table_desc, NULL,
+ &j);
+ if (ACPI_FAILURE(status)) {
+ acpi_tb_uninstall_table(table_desc);
+ }
+ }
+ }
+ }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -369,6 +394,10 @@ void acpi_put_table(struct acpi_table_header *table)
ACPI_FUNCTION_TRACE(acpi_put_table);
+ if (!table) {
+ return_VOID;
+ }
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Walk the root table list */
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index b71ce3b817ea..d81f442228b8 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -206,7 +206,7 @@ acpi_status acpi_tb_load_namespace(void)
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
table = &acpi_gbl_root_table_list.tables[i];
- if (!acpi_gbl_root_table_list.tables[i].address ||
+ if (!table->address ||
(!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
&& !ACPI_COMPARE_NAME(table->signature.ascii,
ACPI_SIG_PSDT)
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 6600bc257516..fb406daf47fa 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -69,8 +69,10 @@ static const char acpi_gbl_hex_to_ascii[] = {
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
+ u64 index;
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+ acpi_ut_short_shift_right(integer, position, &index);
+ return (acpi_gbl_hex_to_ascii[index & 0xF]);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index aa0502d1d019..5f9c680076c4 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -47,15 +47,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmath")
-/*
- * Optional support for 64-bit double-precision integer divide. This code
- * is configurable and is implemented in order to support 32-bit kernel
- * environments where a 64-bit double-precision math library is not available.
- *
- * Support for a more normal 64-bit divide/modulo (with check for a divide-
- * by-zero) appears after this optional section of code.
- */
-#ifndef ACPI_USE_NATIVE_DIVIDE
/* Structures used only for 64-bit divide */
typedef struct uint64_struct {
u32 lo;
@@ -69,6 +60,217 @@ typedef union uint64_overlay {
} uint64_overlay;
+/*
+ * Optional support for 64-bit double-precision integer multiply and shift.
+ * This code is configurable and is implemented in order to support 32-bit
+ * kernel environments where a 64-bit double-precision math library is not
+ * available.
+ */
+#ifndef ACPI_USE_NATIVE_MATH64
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: multiplicand - 64-bit multiplicand
+ * multiplier - 32-bit multiplier
+ * out_product - Pointer to where the product is returned
+ *
+ * DESCRIPTION: Perform a short multiply.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+ union uint64_overlay multiplicand_ovl;
+ union uint64_overlay product;
+ u32 carry32;
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ multiplicand_ovl.full = multiplicand;
+
+ /*
+ * The Product is 64 bits, the carry is always 32 bits,
+ * and is generated by the second multiply.
+ */
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier,
+ product.part.hi, carry32);
+
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier,
+ product.part.lo, carry32);
+
+ product.part.hi += carry32;
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = product.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short left shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.hi = operand_ovl.part.lo;
+ operand_ovl.part.lo ^= operand_ovl.part.lo;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short right shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.lo = operand_ovl.part.hi;
+ operand_ovl.part.hi ^= operand_ovl.part.hi;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#else
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_multiply function.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = multiplicand * multiplier;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_left function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand << count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_right function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand >> count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#endif
+
+/*
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -258,6 +460,7 @@ acpi_ut_divide(u64 in_dividend,
}
#else
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide, acpi_ut_divide
@@ -272,6 +475,7 @@ acpi_ut_divide(u64 in_dividend,
* perform the divide.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 443ffad01209..45c78c2adbf0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -224,7 +224,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
*
* RETURN: Status
*
- * DESCRIPTION: Walk through a package
+ * DESCRIPTION: Walk through a package, including subpackages
*
******************************************************************************/
@@ -236,8 +236,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_generic_state *state;
- u32 this_index;
union acpi_operand_object *this_source_obj;
+ u32 this_index;
ACPI_FUNCTION_TRACE(ut_walk_package_tree);
@@ -251,8 +251,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* Get one element of the package */
this_index = state->pkg.index;
- this_source_obj = (union acpi_operand_object *)
+ this_source_obj =
state->pkg.source_object->package.elements[this_index];
+ state->pkg.this_target_obj =
+ &state->pkg.source_object->package.elements[this_index];
/*
* Check for:
@@ -339,6 +341,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* We should never get here */
+ ACPI_ERROR((AE_INFO, "State list did not terminate correctly"));
+
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 64e6641bfe82..cb3db9fed50d 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -483,6 +483,11 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/* A namespace node should never get here */
+ ACPI_ERROR((AE_INFO,
+ "Received a namespace node [%4.4s] "
+ "where an operand object is required",
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ internal_object)->name.ascii));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 7e6e1ae6140f..c008589b41bd 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -176,7 +176,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
u64 number = 0;
while (isdigit((int)*string)) {
- number *= 10;
+ acpi_ut_short_multiply(number, 10, &number);
number += *(string++) - '0';
}
@@ -286,7 +286,7 @@ static char *acpi_ut_format_number(char *string,
/* Generate full string in reverse order */
pos = acpi_ut_put_number(reversed_string, number, base, upper);
- i = ACPI_PTR_DIFF(pos, reversed_string);
+ i = (s32)ACPI_PTR_DIFF(pos, reversed_string);
/* Printing 100 using %2d gives "100", not "00" */
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
if (!s) {
s = "<NULL>";
}
- length = acpi_ut_bound_string_length(s, precision);
+ length = (s32)acpi_ut_bound_string_length(s, precision);
if (!(type & ACPI_FORMAT_LEFT)) {
while (length < width--) {
pos =
@@ -579,7 +579,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
}
}
- return (ACPI_PTR_DIFF(pos, string));
+ return ((int)ACPI_PTR_DIFF(pos, string));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 70f78a4bf13b..f9801d13547f 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -237,6 +237,13 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
+ /*
+ * Don't attempt to perform any validation on the 2nd byte.
+ * Although all known ASL compilers insert a zero for the 2nd
+ * byte, it can also be a checksum (as per the ACPI spec),
+ * and this is occasionally seen in the field. July 2017.
+ */
+
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 64308c304ade..eafabcd2fada 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -226,7 +226,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index)
+ u32 index)
{
union acpi_generic_state *state;
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index f42be01d99fd..9633ee142855 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -276,8 +276,8 @@ static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
/* Convert and insert (add) the decimal digit */
- next_value =
- (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO);
+ acpi_ut_short_multiply(return_value, 10, &next_value);
+ next_value += (ascii_digit - ACPI_ASCII_ZERO);
/* Check for overflow (32 or 64 bit) - return current converted value */
@@ -335,9 +335,8 @@ static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
/* Convert and insert the hex digit */
- return_value =
- (return_value << 4) |
- acpi_ut_ascii_char_to_hex(ascii_digit);
+ acpi_ut_short_shift_left(return_value, 4, &return_value);
+ return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
string++;
valid_digits++;
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 9a07a42cae34..3c8de88ecbd5 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -591,6 +591,10 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
return_VOID;
}
+ if (!acpi_gbl_global_list) {
+ goto exit;
+ }
+
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
@@ -602,7 +606,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (element->size <
sizeof(struct acpi_common_descriptor)) {
- acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u "
"[Not a Descriptor - too small]\n",
descriptor, element->size,
element->module, element->line);
@@ -612,7 +616,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
ACPI_DESC_TYPE_CACHED) {
acpi_os_printf
- ("%p Length 0x%04X %9.9s-%u [%s] ",
+ ("%p Length 0x%04X %9.9s-%4.4u [%s] ",
descriptor, element->size,
element->module, element->line,
acpi_ut_get_descriptor_name
@@ -705,6 +709,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
element = element->next;
}
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
/* Print summary */
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index a3215ee671c1..5b9e8dc29c09 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -680,13 +680,36 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
return ret ? NULL : ops;
}
+static int nc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_named_component *ncomp;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return -ENODEV;
+
+ ncomp = (struct acpi_iort_named_component *)node->node_data;
+
+ *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<ncomp->memory_address_limit;
+
+ return 0;
+}
+
/**
- * iort_set_dma_mask - Set-up dma mask for a device.
+ * iort_dma_setup() - Set-up device DMA parameters.
*
* @dev: device to configure
+ * @dma_addr: device DMA address result pointer
+ * @size: DMA range size result pointer
*/
-void iort_set_dma_mask(struct device *dev)
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
+ u64 mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret, msb;
+
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* setup the correct supported mask.
@@ -700,6 +723,36 @@ void iort_set_dma_mask(struct device *dev)
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+
+ if (dev_is_pci(dev))
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ else
+ ret = nc_dma_get_range(dev, &size);
+
+ if (!ret) {
+ msb = fls64(dmaaddr + size - 1);
+ /*
+ * Round-up to the power-of-two mask or set
+ * the mask to the whole 64-bit address space
+ * in case the DMA region covers the full
+ * memory window.
+ */
+ mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
+ /*
+ * Limit coherent and dma mask based on size
+ * retrieved from firmware.
+ */
+ dev->coherent_dma_mask = mask;
+ *dev->dma_mask = mask;
+ }
+
+ *dma_addr = dmaaddr;
+ *dma_size = size;
+
+ dev->dma_pfn_offset = PFN_DOWN(offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
/**
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index af74b420ec83..59f2f96fdb7e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -995,9 +995,6 @@ void __init acpi_early_init(void)
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
- /* It's safe to verify table checksums during late stage */
- acpi_gbl_verify_table_checksum = TRUE;
-
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 2ed6935d4483..fbcc73f7a099 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -401,6 +401,8 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
+ acpi_handle_debug(handle, "Wake notify\n");
+
adev = acpi_bus_get_acpi_device(handle);
if (!adev)
return;
@@ -409,8 +411,12 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
- if (adev->wakeup.context.func)
+ if (adev->wakeup.context.func) {
+ acpi_handle_debug(handle, "Running %pF for %s\n",
+ adev->wakeup.context.func,
+ dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
+ }
}
mutex_unlock(&acpi_pm_notifier_lock);
@@ -682,55 +688,88 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
}
}
+static DEFINE_MUTEX(acpi_wakeup_lock);
+
+static int __acpi_device_wakeup_enable(struct acpi_device *adev,
+ u32 target_state, int max_count)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+ acpi_status status;
+ int error = 0;
+
+ mutex_lock(&acpi_wakeup_lock);
+
+ if (wakeup->enable_count >= max_count)
+ goto out;
+
+ if (wakeup->enable_count > 0)
+ goto inc;
+
+ error = acpi_enable_wakeup_device_power(adev, target_state);
+ if (error)
+ goto out;
+
+ status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(status)) {
+ acpi_disable_wakeup_device_power(adev);
+ error = -EIO;
+ goto out;
+ }
+
+inc:
+ wakeup->enable_count++;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
+ return error;
+}
+
/**
- * acpi_device_wakeup - Enable/disable wakeup functionality for device.
- * @adev: ACPI device to enable/disable wakeup functionality for.
+ * acpi_device_wakeup_enable - Enable wakeup functionality for device.
+ * @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
- * @enable: Whether to enable or disable the wakeup functionality.
*
- * Enable/disable the GPE associated with @adev so that it can generate
- * wakeup signals for the device in response to external (remote) events and
- * enable/disable device wakeup power.
+ * Enable the GPE associated with @adev so that it can generate wakeup signals
+ * for the device in response to external (remote) events and enable wakeup
+ * power for it.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
+{
+ return __acpi_device_wakeup_enable(adev, target_state, 1);
+}
+
+/**
+ * acpi_device_wakeup_disable - Disable wakeup functionality for device.
+ * @adev: ACPI device to disable wakeup functionality for.
+ *
+ * Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
- bool enable)
+static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
- if (enable) {
- acpi_status res;
- int error;
+ mutex_lock(&acpi_wakeup_lock);
- if (adev->wakeup.flags.enabled)
- return 0;
+ if (!wakeup->enable_count)
+ goto out;
- error = acpi_enable_wakeup_device_power(adev, target_state);
- if (error)
- return error;
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
- res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
- acpi_disable_wakeup_device_power(adev);
- return -EIO;
- }
- adev->wakeup.flags.enabled = 1;
- } else if (adev->wakeup.flags.enabled) {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- acpi_disable_wakeup_device_power(adev);
- adev->wakeup.flags.enabled = 0;
- }
- return 0;
+ wakeup->enable_count--;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
}
-/**
- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
+ int max_count)
{
struct acpi_device *adev;
int error;
@@ -744,13 +783,41 @@ int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
- error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
+ if (!enable) {
+ acpi_device_wakeup_disable(adev);
+ dev_dbg(dev, "Wakeup disabled by ACPI\n");
+ return 0;
+ }
+
+ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
+ max_count);
if (!error)
- dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled");
+ dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
-EXPORT_SYMBOL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, 1);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
+ * @dev: Bridge device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
@@ -800,13 +867,15 @@ int acpi_dev_runtime_suspend(struct device *dev)
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
- if (remote_wakeup && error)
- return -EAGAIN;
+ if (remote_wakeup) {
+ error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (error)
+ return -EAGAIN;
+ }
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ if (error && remote_wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -829,7 +898,7 @@ int acpi_dev_runtime_resume(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -884,13 +953,15 @@ int acpi_dev_suspend_late(struct device *dev)
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- error = acpi_device_wakeup(adev, target_state, wakeup);
- if (wakeup && error)
- return error;
+ if (wakeup) {
+ error = acpi_device_wakeup_enable(adev, target_state);
+ if (error)
+ return error;
+ }
error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ if (error && wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -913,7 +984,7 @@ int acpi_dev_resume_early(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1056,7 +1127,7 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
@@ -1100,7 +1171,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 62068a5e814f..fdfae6f3c0b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -112,8 +112,7 @@ enum {
EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
- * current command processing */
+ EC_FLAGS_GPE_MASKED, /* GPE masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -425,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
wake_up(&ec->wait);
}
-static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_mask_gpe(struct acpi_ec *ec)
{
- if (!test_bit(flag, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
acpi_ec_disable_gpe(ec, false);
ec_dbg_drv("Polling enabled");
- set_bit(flag, &ec->flags);
+ set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
}
}
-static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
{
- if (test_bit(flag, &ec->flags)) {
- clear_bit(flag, &ec->flags);
+ if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
acpi_ec_enable_gpe(ec, false);
ec_dbg_drv("Polling disabled");
}
@@ -464,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -480,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -700,7 +699,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
}
}
out:
@@ -798,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1586,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec)
{
if (!boot_ec)
return false;
- if (ec->handle == boot_ec->handle &&
- ec->gpe == boot_ec->gpe &&
- ec->command_addr == boot_ec->command_addr &&
+ if (ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr)
return true;
return false;
@@ -1613,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device)
if (acpi_is_boot_ec(ec)) {
boot_ec_is_ecdt = false;
+ /*
+ * Trust PNP0C09 namespace location rather than ECDT ID.
+ *
+ * But trust ECDT GPE rather than _GPE because of ASUS quirks,
+ * so do not change boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
@@ -1741,24 +1745,26 @@ error:
* functioning ECDT EC first in order to handle the events.
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
*/
-int __init acpi_ec_ecdt_start(void)
+static int __init acpi_ec_ecdt_start(void)
{
acpi_handle handle;
if (!boot_ec)
return -ENODEV;
- /*
- * The DSDT EC should have already been started in
- * acpi_ec_add().
- */
+ /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
if (!boot_ec_is_ecdt)
return -ENODEV;
/*
* At this point, the namespace and the GPE is initialized, so
* start to find the namespace objects and handle the events.
+ *
+ * Note: ec->handle can be valid if this function is called after
+ * acpi_ec_add(), hence the fast path.
*/
- if (!acpi_ec_ecdt_get_handle(&handle))
+ if (boot_ec->handle != ACPI_ROOT_OBJECT)
+ handle = boot_ec->handle;
+ else if (!acpi_ec_ecdt_get_handle(&handle))
return -ENODEV;
return acpi_config_boot_ec(boot_ec, handle, true, true);
}
@@ -2003,20 +2009,17 @@ static inline void acpi_ec_query_exit(void)
int __init acpi_ec_init(void)
{
int result;
+ int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init();
if (result)
- goto err_exit;
- /* Now register the driver for the EC */
- result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result)
- goto err_exit;
+ return result;
-err_exit:
- if (result)
- acpi_ec_query_exit();
- return result;
+ /* Drivers must be started after acpi_ec_query_init() */
+ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ ecdt_fail = acpi_ec_ecdt_start();
+ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
/* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 58dd7ab3c653..4361c4415b4f 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void);
-int acpi_ec_ecdt_start(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
@@ -233,6 +232,12 @@ static inline void suspend_nvs_restore(void) {}
void acpi_init_properties(struct acpi_device *adev);
void acpi_free_properties(struct acpi_device *adev);
+#ifdef CONFIG_X86
+void acpi_extract_apple_properties(struct acpi_device *adev);
+#else
+static inline void acpi_extract_apple_properties(struct acpi_device *adev) {}
+#endif
+
/*--------------------------------------------------------------------------
Watchdog
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 723bee58bbcf..19cdd8a783a9 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "internal.h"
@@ -257,12 +258,11 @@ bool acpi_osi_is_win8(void)
}
EXPORT_SYMBOL(acpi_osi_is_win8);
-static void __init acpi_osi_dmi_darwin(bool enable,
- const struct dmi_system_id *d)
+static void __init acpi_osi_dmi_darwin(void)
{
- pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n");
osi_config.darwin_dmi = 1;
- __acpi_osi_setup_darwin(enable);
+ __acpi_osi_setup_darwin(true);
}
static void __init acpi_osi_dmi_linux(bool enable,
@@ -273,13 +273,6 @@ static void __init acpi_osi_dmi_linux(bool enable,
__acpi_osi_setup_linux(enable);
}
-static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
-{
- acpi_osi_dmi_darwin(true, d);
-
- return 0;
-}
-
static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
{
acpi_osi_dmi_linux(true, d);
@@ -481,30 +474,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
-
- /*
- * Enable _OSI("Darwin") for all apple platforms.
- */
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- },
- },
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- },
- },
{}
};
static __init void acpi_osi_dmi_blacklisted(void)
{
dmi_check_system(acpi_osi_dmi_table);
+
+ /* Enable _OSI("Darwin") for Apple platforms. */
+ if (x86_apple_machine)
+ acpi_osi_dmi_darwin();
}
int __init early_acpi_osi_init(void)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 9eec3095e6c3..6fc204a52493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -431,8 +432,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
* been called successfully. We know the feature set supported by the
* platform, so avoid calling _OSC at all
*/
-
- if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
+ if (x86_apple_machine) {
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
decode_osc_control(root, "OS assumes control of",
root->osc_control_set);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 3b7d5be5b7ed..6c99d3f81095 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,6 +27,9 @@
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
+#define AXP288_ADC_TS_PIN_GPADC 0xf2
+#define AXP288_ADC_TS_PIN_ON 0xf3
+
static struct pmic_table power_table[] = {
{
.address = 0x00,
@@ -209,11 +212,23 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
u8 buf[2];
+ int ret;
- if (regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2))
- return -EIO;
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_PIN_GPADC);
+ if (ret)
+ return ret;
+
+ /* After switching to the GPADC pin give things some time to settle */
+ usleep_range(6000, 10000);
+
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ if (ret == 0)
+ ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
+
+ regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
- return (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
+ return ret;
}
static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 591d1dd3f04e..9d6aff22684e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device)
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
- dev_warn(&device->dev, "CPPC data invalid or not present\n");
+ dev_dbg(&device->dev, "CPPC data invalid or not present\n");
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..09b7974b6c62 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -339,6 +339,9 @@ void acpi_init_properties(struct acpi_device *adev)
INIT_LIST_HEAD(&adev->data.subnodes);
+ if (!adev->handle)
+ return;
+
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
@@ -373,6 +376,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
+
+ if (!adev->data.pointer)
+ acpi_extract_apple_properties(adev);
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
@@ -1047,7 +1053,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
+ if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cd4c4271dc4c..d85e010ee2cc 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
return AE_OK;
}
+static int __acpi_dev_get_resources(struct acpi_device *adev,
+ struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data, char *method)
+{
+ struct res_proc_context c;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ if (!acpi_has_method(adev->handle, method))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, method,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+
/**
* acpi_dev_get_resources - Get current resources of a device.
* @adev: ACPI device node to get the resources for.
@@ -601,30 +630,45 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data)
{
- struct res_proc_context c;
- acpi_status status;
+ return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
+ METHOD_NAME__CRS);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
- if (!adev || !adev->handle || !list_empty(list))
- return -EINVAL;
+static int is_memory(struct acpi_resource *ares, void *not_used)
+{
+ struct resource_win win;
+ struct resource *res = &win.res;
- if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
- return 0;
+ memset(&win, 0, sizeof(win));
- c.list = list;
- c.preproc = preproc;
- c.preproc_data = preproc_data;
- c.count = 0;
- c.error = 0;
- status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
- acpi_dev_process_resource, &c);
- if (ACPI_FAILURE(status)) {
- acpi_dev_free_resource_list(list);
- return c.error ? c.error : -EIO;
- }
+ return !(acpi_dev_resource_memory(ares, res)
+ || acpi_dev_resource_address_space(ares, &win)
+ || acpi_dev_resource_ext_address_space(ares, &win));
+}
- return c.count;
+/**
+ * acpi_dev_get_dma_resources - Get current DMA resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * Evaluate the _DMA method for the given device node and process its
+ * output.
+ *
+ * The resultant struct resource objects are put on the list pointed to
+ * by @list, that must be empty initially, as members of struct
+ * resource_entry objects. Callers of this routine should use
+ * %acpi_dev_free_resource_list() to free that list.
+ *
+ * The number of resources in the output list is returned on success,
+ * an error code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return __acpi_dev_get_resources(adev, list, is_memory, NULL,
+ METHOD_NAME__DMA);
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13ad4bbb..a18463799ad7 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -31,7 +31,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
-#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "sbshc.h"
#include "battery.h"
@@ -58,8 +58,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static bool sbs_manager_broken;
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -632,31 +630,12 @@ static void acpi_sbs_callback(void *context)
}
}
-static int disable_sbs_manager(const struct dmi_system_id *d)
-{
- sbs_manager_broken = true;
- return 0;
-}
-
-static struct dmi_system_id acpi_sbs_dmi_table[] = {
- {
- .callback = disable_sbs_manager,
- .ident = "Apple",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
- },
- },
- { },
-};
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
- dmi_check_system(acpi_sbs_dmi_table);
-
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
@@ -677,7 +656,7 @@ static int acpi_sbs_add(struct acpi_device *device)
result = 0;
- if (!sbs_manager_broken) {
+ if (!x86_apple_machine) {
result = acpi_manager_get_info(sbs);
if (!result) {
sbs->manager_present = 1;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 33897298f03e..ac8a8a52ee4a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/nls.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/x86/apple.h>
#include <asm/pgtable.h>
@@ -1360,6 +1361,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
}
/**
+ * acpi_dma_get_range() - Get device DMA parameters.
+ *
+ * @dev: device to configure
+ * @dma_addr: pointer device DMA address result
+ * @offset: pointer to the DMA offset result
+ * @size: pointer to DMA range size result
+ *
+ * Evaluate DMA regions and return respectively DMA region start, offset
+ * and size in dma_addr, offset and size on parsing success; it does not
+ * update the passed in values on failure.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
+ u64 *size)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(list);
+ struct resource_entry *rentry;
+ int ret;
+ struct device *dma_dev = dev;
+ u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+
+ /*
+ * Walk the device tree chasing an ACPI companion with a _DMA
+ * object while we go. Stop if we find a device with an ACPI
+ * companion containing a _DMA method.
+ */
+ do {
+ adev = ACPI_COMPANION(dma_dev);
+ if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
+ break;
+
+ dma_dev = dma_dev->parent;
+ } while (dma_dev);
+
+ if (!dma_dev)
+ return -ENODEV;
+
+ if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
+ acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_get_dma_resources(adev, &list);
+ if (ret > 0) {
+ list_for_each_entry(rentry, &list, node) {
+ if (dma_offset && rentry->offset != dma_offset) {
+ ret = -EINVAL;
+ dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ goto out;
+ }
+ dma_offset = rentry->offset;
+
+ /* Take lower and upper limits */
+ if (rentry->res->start < dma_start)
+ dma_start = rentry->res->start;
+ if (rentry->res->end > dma_end)
+ dma_end = rentry->res->end;
+ }
+
+ if (dma_start >= dma_end) {
+ ret = -EINVAL;
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
+ goto out;
+ }
+
+ *dma_addr = dma_start - dma_offset;
+ len = dma_end - dma_start;
+ *size = max(len, len + 1);
+ *offset = dma_offset;
+ }
+ out:
+ acpi_dev_free_resource_list(&list);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
@@ -1367,20 +1447,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
- u64 size;
+ u64 dma_addr = 0, size = 0;
- iort_set_dma_mask(dev);
+ iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
- /*
- * Assume dma valid range starts at 0 and covers the whole
- * coherent_dma_mask.
- */
- arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, dma_addr, size,
+ iommu, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1452,6 +1528,12 @@ static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
+ /* Macs use device properties in lieu of _CRS resources */
+ if (x86_apple_machine &&
+ (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
+ fwnode_property_present(&device->fwnode, "i2cAddress")))
+ return true;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
@@ -2058,6 +2140,9 @@ int __init acpi_scan_init(void)
acpi_get_spcr_uart_addr();
}
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -2082,10 +2167,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_gpe_apply_masked_gpes();
- acpi_update_all_gpes();
- acpi_ec_ecdt_start();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fa8243c5c062..dd5f21ca483e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -870,7 +870,7 @@ static struct syscore_ops acpi_sleep_syscore_ops = {
.resume = acpi_restore_bm_rld,
};
-void acpi_sleep_syscore_init(void)
+static void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..e94b2023eed5 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
#include <linux/serial_core.h>
/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
+/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
* quirk detection in pci_mcfg.c.
@@ -95,16 +105,17 @@ int __init parse_spcr(bool earlycon)
}
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (table->serial_port.access_width) {
+ switch (ACPI_ACCESS_BIT_WIDTH((
+ table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
- case ACPI_ACCESS_SIZE_BYTE:
+ case 8:
iotype = "mmio";
break;
- case ACPI_ACCESS_SIZE_WORD:
+ case 16:
iotype = "mmio16";
break;
- case ACPI_ACCESS_SIZE_DWORD:
+ case 32:
iotype = "mmio32";
break;
}
@@ -147,8 +158,30 @@ int __init parse_spcr(bool earlycon)
goto done;
}
- if (qdf2400_erratum_44_present(&table->header))
- uart = "qdf2400_e44";
+ /*
+ * If the E44 erratum is required, then we need to tell the pl011
+ * driver to implement the work-around.
+ *
+ * The global variable is used by the probe function when it
+ * creates the UARTs, whether or not they're used as a console.
+ *
+ * If the user specifies "traditional" earlycon, the qdf2400_e44
+ * console name matches the EARLYCON_DECLARE() statement, and
+ * SPCR is not used. Parameter "earlycon" is false.
+ *
+ * If the user specifies "SPCR" earlycon, then we need to update
+ * the console name so that it also says "qdf2400_e44". Parameter
+ * "earlycon" is true.
+ *
+ * For consistency, if we change the console name, then we do it
+ * for everyone, not just earlycon.
+ */
+ if (qdf2400_erratum_44_present(&table->header)) {
+ qdf2400_e44_present = true;
+ if (earlycon)
+ uart = "qdf2400_e44";
+ }
+
if (xgene_8250_erratum_present(table))
iotype = "mmio32";
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index e414fabf7315..32d7e43b64c4 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -2,6 +2,8 @@
* sysfs.c - ACPI sysfs interface to userspace.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -552,11 +554,15 @@ static void fixed_event_count(u32 event_number)
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
- if (event_type == ACPI_EVENT_TYPE_GPE)
+ if (event_type == ACPI_EVENT_TYPE_GPE) {
gpe_count(event_number);
-
- if (event_type == ACPI_EVENT_TYPE_FIXED)
+ pr_debug("GPE event 0x%02x\n", event_number);
+ } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
fixed_event_count(event_number);
+ pr_debug("Fixed event 0x%02x\n", event_number);
+ } else {
+ pr_debug("Other event 0x%02x\n", event_number);
+ }
}
static int get_status(u32 index, acpi_event_status *status,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ff425390bfa8..80ce2a7d224b 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -740,10 +740,10 @@ int __init acpi_table_init(void)
if (acpi_verify_table_checksum) {
pr_info("Early table checksum verification enabled\n");
- acpi_gbl_verify_table_checksum = TRUE;
+ acpi_gbl_enable_table_validation = TRUE;
} else {
pr_info("Early table checksum verification disabled\n");
- acpi_gbl_verify_table_checksum = FALSE;
+ acpi_gbl_enable_table_validation = FALSE;
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
new file mode 100644
index 000000000000..51b4cf9f25da
--- /dev/null
+++ b/drivers/acpi/x86/apple.c
@@ -0,0 +1,141 @@
+/*
+ * apple.c - Apple ACPI quirks
+ * Copyright (C) 2017 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/uuid.h>
+
+/* Apple _DSM device properties GUID */
+static const guid_t apple_prp_guid =
+ GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c,
+ 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b);
+
+/**
+ * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties
+ * @adev: ACPI device for which to retrieve the properties
+ *
+ * Invoke Apple's custom _DSM once to check the protocol version and once more
+ * to retrieve the properties. They are marshalled up in a single package as
+ * alternating key/value elements, unlike _DSD which stores them as a package
+ * of 2-element packages. Convert to _DSD format and make them available under
+ * the primary fwnode.
+ */
+void acpi_extract_apple_properties(struct acpi_device *adev)
+{
+ unsigned int i, j = 0, newsize = 0, numprops, numvalid;
+ union acpi_object *props, *newprops;
+ unsigned long *valid = NULL;
+ void *free_space;
+
+ if (!x86_apple_machine)
+ return;
+
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0,
+ NULL, ACPI_TYPE_BUFFER);
+ if (!props)
+ return;
+
+ if (!props->buffer.length)
+ goto out_free;
+
+ if (props->buffer.pointer[0] != 3) {
+ acpi_handle_info(adev->handle, FW_INFO
+ "unsupported properties version %*ph\n",
+ props->buffer.length, props->buffer.pointer);
+ goto out_free;
+ }
+
+ ACPI_FREE(props);
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!props)
+ return;
+
+ numprops = props->package.count / 2;
+ if (!numprops)
+ goto out_free;
+
+ valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL);
+ if (!valid)
+ goto out_free;
+
+ /* newsize = key length + value length of each tuple */
+ for (i = 0; i < numprops; i++) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+
+ if ( key->type != ACPI_TYPE_STRING ||
+ (val->type != ACPI_TYPE_INTEGER &&
+ val->type != ACPI_TYPE_BUFFER))
+ continue; /* skip invalid properties */
+
+ __set_bit(i, valid);
+ newsize += key->string.length + 1;
+ if ( val->type == ACPI_TYPE_BUFFER)
+ newsize += val->buffer.length;
+ }
+
+ numvalid = bitmap_weight(valid, numprops);
+ if (numprops > numvalid)
+ acpi_handle_info(adev->handle, FW_INFO
+ "skipped %u properties: wrong type\n",
+ numprops - numvalid);
+ if (numvalid == 0)
+ goto out_free;
+
+ /* newsize += top-level package + 3 objects for each key/value tuple */
+ newsize += (1 + 3 * numvalid) * sizeof(union acpi_object);
+ newprops = ACPI_ALLOCATE_ZEROED(newsize);
+ if (!newprops)
+ goto out_free;
+
+ /* layout: top-level package | packages | key/value tuples | strings */
+ newprops->type = ACPI_TYPE_PACKAGE;
+ newprops->package.count = numvalid;
+ newprops->package.elements = &newprops[1];
+ free_space = &newprops[1 + 3 * numvalid];
+
+ for_each_set_bit(i, valid, numprops) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+ unsigned int k = 1 + numvalid + j * 2; /* index into newprops */
+ unsigned int v = k + 1;
+
+ newprops[1 + j].type = ACPI_TYPE_PACKAGE;
+ newprops[1 + j].package.count = 2;
+ newprops[1 + j].package.elements = &newprops[k];
+
+ newprops[k].type = ACPI_TYPE_STRING;
+ newprops[k].string.length = key->string.length;
+ newprops[k].string.pointer = free_space;
+ memcpy(free_space, key->string.pointer, key->string.length);
+ free_space += key->string.length + 1;
+
+ newprops[v].type = val->type;
+ if (val->type == ACPI_TYPE_INTEGER) {
+ newprops[v].integer.value = val->integer.value;
+ } else {
+ newprops[v].buffer.length = val->buffer.length;
+ newprops[v].buffer.pointer = free_space;
+ memcpy(free_space, val->buffer.pointer,
+ val->buffer.length);
+ free_space += val->buffer.length;
+ }
+ j++; /* count valid properties */
+ }
+ WARN_ON(free_space != (void *)newprops + newsize);
+
+ adev->data.properties = newprops;
+ adev->data.pointer = newprops;
+
+out_free:
+ ACPI_FREE(props);
+ kfree(valid);
+}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..831cdd7d197d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
const char *failure_string;
struct binder_buffer *buffer;
- if (proc->tsk != current)
+ if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
+ if (!res) {
+ rc = -ENODEV;
goto disable_resources;
+ }
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
- if (!pwrdn_reg)
+ if (!pwrdn_reg) {
+ rc = -ENOMEM;
goto disable_resources;
+ }
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fa7dd4394c02..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
+ if (!ata_id_has_trusted(dev->id))
+ return;
+
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..bfbe1e154128 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,7 +30,6 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
-#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
* state of the firmware loading.
*/
struct fw_state {
- struct swait_queue_head wq;
+ struct completion completion;
enum fw_status status;
};
static void fw_state_init(struct fw_state *fw_st)
{
- init_swait_queue_head(&fw_st->wq);
+ init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
{
long ret;
- ret = swait_event_interruptible_timeout(fw_st->wq,
- __fw_state_is_done(READ_ONCE(fw_st->status)),
- timeout);
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
return -ENOENT;
if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
WRITE_ONCE(fw_st->status, status);
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
- swake_up(&fw_st->wq);
+ complete_all(&fw_st->completion);
}
#define fw_state_start(fw_st) \
__fw_state_set(fw_st, FW_STATUS_LOADING)
#define fw_state_done(fw_st) \
__fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait(fw_st) \
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st) false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
{
return fw_st->status == status;
}
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
#define fw_state_aborted(fw_st) \
__fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_is_done(fw_st) \
__fw_state_check(fw_st, FW_STATUS_DONE)
#define fw_state_is_loading(fw_st) \
__fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait_timeout(fw_st, timeout) \
__fw_state_wait_common(fw_st, timeout)
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+ struct firmware_buf *buf;
+
+ /* Loaded directly? */
+ if (!fw || !fw->priv)
+ return;
+
+ buf = fw->priv;
+ if (!fw_state_is_aborted(&buf->fw_st))
+ fw_state_aborted(&buf->fw_st);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
out:
if (ret < 0) {
+ fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..f321b96405f5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
- loff_t logical_blocksize)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
- lo->lo_logical_blocksize = logical_blocksize;
- blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
- blk_queue_logical_block_size(lo->lo_queue,
- lo->lo_logical_blocksize);
- }
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
- int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
- lo_bits = blksize_bits(lo->lo_logical_blocksize);
- blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false;
lo->lo_blocksize = lo_blocksize;
- lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
- int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
- if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
- if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
- lo->lo_logical_blocksize = 512;
- lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
- if (LO_INFO_BLOCKSIZE(info) != 512 &&
- LO_INFO_BLOCKSIZE(info) != 1024 &&
- LO_INFO_BLOCKSIZE(info) != 2048 &&
- LO_INFO_BLOCKSIZE(info) != 4096)
- return -EINVAL;
- if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
- return -EINVAL;
- }
-
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit ||
- lo->lo_flags != lo_flags ||
- ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
- lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
- LO_INFO_BLOCKSIZE(info))) {
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
- lo->lo_logical_blocksize);
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..fecd3f97ef8c 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,7 +49,6 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
- unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b16ead1da58..ad9749463d4f 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -875,6 +875,56 @@ static void print_version(void)
printk(KERN_INFO "%s", version);
}
+struct vdc_check_port_data {
+ int dev_no;
+ char *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct vdc_check_port_data *port_data;
+
+ port_data = (struct vdc_check_port_data *)arg;
+
+ if ((vdev->dev_no == port_data->dev_no) &&
+ (!(strcmp((char *)&vdev->type, port_data->type))) &&
+ dev_get_drvdata(dev)) {
+ /* This device has already been configured
+ * by vdc_port_probe()
+ */
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+ struct vdc_check_port_data port_data;
+ struct device *dev;
+
+ port_data.dev_no = vdev->dev_no;
+ port_data.type = (char *)&vdev->type;
+
+ dev = device_find_child(vdev->dev.parent, &port_data,
+ vdc_device_probed);
+
+ if (dev)
+ return true;
+
+ return false;
+}
+
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto err_out_release_mdesc;
}
+ /* Check if this device is part of an mpgroup */
+ if (vdc_port_mpgroup_check(vdev)) {
+ printk(KERN_WARNING
+ "VIO: Ignoring extra vdisk port %s",
+ dev_name(&vdev->dev));
+ goto err_out_release_mdesc;
+ }
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_tx_ring;
+ /* Note that the device driver_data is used to determine
+ * whether the port has been probed.
+ */
dev_set_drvdata(&vdev->dev, port);
mdesc_release(hp);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1498b899a593..d3d5523862c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
+ unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- string_get_size(capacity, queue_logical_block_size(q),
+ nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
+
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(capacity, queue_logical_block_size(q),
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
- "new size: %llu %d-byte logical blocks (%s/%s)\n",
- (unsigned long long)capacity,
- queue_logical_block_size(q),
- cap_str_10, cap_str_2);
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ nblocks,
+ queue_logical_block_size(q),
+ cap_str_10,
+ cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..2adb8599be93 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
+ bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&ring->inflight) > 0)
- return -EBUSY;
+ if (atomic_read(&ring->inflight) > 0) {
+ busy = true;
+ continue;
+ }
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
+ if (busy)
+ return -EBUSY;
+
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98e34e4c62b8..2468c28d4771 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
- req_op(shadow[i].request) == REQ_OP_DISCARD ||
- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+ req_op(shadow[j].request) == REQ_OP_DISCARD ||
+ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..3b1b6340ba13 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- char compressor[CRYPTO_MAX_ALG_NAME];
+ char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
return -EBUSY;
}
- strlcpy(zram->compressor, compressor, sizeof(compressor));
+ strcpy(zram->compressor, compressor);
up_write(&zram->init_lock);
return len;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index afa3ce7d3e72..8ad92707e45f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
- pr_notice("random: %s called from %pF with crng_init=%d\n",
+ pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fcae5ca6ac92..54a67f8a28eb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -262,7 +262,7 @@ config CLKSRC_LPC32XX
config CLKSRC_PISTACHIO
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
- depends on HAS_IOMEM
+ depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aae87c4c546e..72bbfccef113 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1440,7 +1440,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
*/
- for (i = i; i < timer_count; i++) {
+ for (i = 0; i < timer_count; i++) {
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index bc48cbf6a795..269db74a0658 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
+ return irq;
}
/* map memory, let base point to the STI instance */
@@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p);
+ if (ret) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- return -ENOENT;
+ return ret;
}
/* get hold of clock */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index d509b500a7b5..4d7aef9d9c15 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -128,9 +128,9 @@ static __init int timer_base_init(struct device_node *np,
const char *name = of_base->name ? of_base->name : np->full_name;
of_base->base = of_io_request_and_map(np, of_base->index, name);
- if (!of_base->base) {
+ if (IS_ERR(of_base->base)) {
pr_err("Failed to iomap (%s)\n", name);
- return -ENXIO;
+ return PTR_ERR(of_base->base);
}
return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0566455f233e..65ee4fcace1f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1613,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return mul_ext_fp(cpu->sample.core_avg_perf,
- cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+ return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
return -1;
}
+extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
const char *names[CPUIDLE_STATE_MAX];
u32 has_stop_states = 0;
int i, rc;
+ u32 supported_flags = pnv_get_supported_cpuidle_states();
+
/* Currently we have snooze statically defined */
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+
+ /*
+ * Skip the platform idle state whose flag isn't in
+ * the supported_cpuidle_states flag mask.
+ */
+ if ((flags[i] & supported_flags) != flags[i])
+ continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 8527a5899a2f..3f819399cd95 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
}
}
+ memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+ memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
return 0;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 427cbe012729..dadc4a808df5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_buf_src;
+ goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_buf_src:
- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d7e219d2669d..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
{
struct sync_file *sync_file = file->private_data;
- if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (list_empty(&sync_file->cb.node) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..b26256f23d67 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
- if (tdc->irq < 0) {
- ret = tdc->irq;
+ if (tdc->irq <= 0) {
+ ret = tdc->irq ?: -ENXIO;
goto irq_dispose;
}
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index c473f4c5ca34..9f6bcf173b0e 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -18,8 +18,8 @@
#define pr_fmt(fmt) "apple-properties: " fmt
#include <linux/bootmem.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
@@ -191,8 +191,7 @@ static int __init map_properties(void)
u64 pa_data;
int ret;
- if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
- !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ if (!x86_apple_machine)
return 0;
pa_data = boot_params.hdr.setup_data;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index e338c3743562..45c65f805fd6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
edge_cause = mvebu_gpio_read_edge_cause(mvchip);
edge_mask = mvebu_gpio_read_edge_mask(mvchip);
- cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
+ cause = (data_in & level_mask) | (edge_cause & edge_mask);
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 16fe9742597b..fc80add5fedb 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -2,6 +2,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
@@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
};
ATTRIBUTE_GROUPS(gpiochip);
+static struct gpio_desc *gpio_to_valid_desc(int gpio)
+{
+ return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
+}
+
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 6558a3ed57a7..e1cde6b80027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}
/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, address, address);
- if (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- amdgpu_mn_invalidate_node(node, address, address);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
- .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6899180b265..c586f44312f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 2d51a2269fc6..5131bfb94f06 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
- u8 reg = msg->reg[0] & 0x7f;
+ u8 reg = msg->reg[1] & 0x7f;
- if (msg->reg[0] & 0x80)
+ if (msg->reg[1] & 0x80)
ctx->xdevcap[reg] = msg->ret;
else
ctx->devcap[reg] = msg->ret;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..0529e500c534 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c0f336d23f9c..aed25c4183bb 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_out_fence_state *fence_state = NULL;
+ struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j, num_fences = 0;
+ unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2214,8 @@ retry:
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
+ fence_state = NULL;
+ num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8dc11064253d..cdaac37907b1 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
- if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
-
drm_gem_object_handle_put_unlocked(obj);
return 0;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5dc8c4350602..e40c12fabbde 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
+ drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..6463fc2c736f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret)
return ret;
- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
- DRM_ERROR("relocation %u outside object", i);
+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..73217c281c9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int height = (i == 0) ? mode_cmd->height :
+ DIV_ROUND_UP(mode_cmd->height, info->vsub);
+ unsigned long size = height * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i];
+
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_gem[i] = to_exynos_gem(obj);
+
+ if (size > exynos_gem[i]->size) {
+ i++;
+ ret = -EINVAL;
+ goto err;
+ }
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 713848c36349..e556a46cd4c2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src:
i915_gem_object_unpin_map(obj);
put_obj:
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_execlist *execlist =
- &vgpu->execlist[workload->ring_id];
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
- if (workload->status || vgpu->resetting)
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out;
+ }
- if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i;
+ int i, j;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
+ for (i = 0; i < num; i++, block++) {
+ for (j = 0; j < block->size; j += 4)
+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+ block->offset) + j));
+ }
+
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active;
bool pv_notified;
bool failsafe;
- bool resetting;
+ unsigned int resetting_eng;
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+ unsigned int device;
+ i915_reg_t offset;
+ unsigned int size;
+ gvt_mmio_func read;
+ gvt_mmio_func write;
+};
+
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+ struct gvt_mmio_block *mmio_block;
+ unsigned int num_mmio_block;
+
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0;
}
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
- unsigned int device;
- i915_reg_t offset;
- unsigned int size;
- gvt_mmio_func read;
- gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
- struct gvt_mmio_block *block = gvt_mmio_blocks;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
int i;
- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+ for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+ pvinfo_mmio_read, pvinfo_mmio_write},
+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
+ gvt->mmio.mmio_block = mmio_blocks;
+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 4))
+ if (WARN_ON(bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req));
- if (!workload->status && !vgpu->resetting) {
+ if (!workload->status && !(vgpu->resetting_eng &
+ ENGINE_MASK(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
- vgpu->resetting = true;
+
+ vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu);
/*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+ intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
}
}
- vgpu->resetting = false;
+ vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 00d8967c8512..d1bd53b73738 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 39ed58a21fc1..e1e971ee2ed5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
}
static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *engine,
- struct i915_gem_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *from = engine->legacy_active_context;
+
if (!ppgtt)
return false;
/* Always load the ppgtt on first use */
- if (!engine->legacy_active_context)
+ if (!from)
return true;
/* Same context without new entries, skip */
- if (engine->legacy_active_context == to &&
+ if ((!from->ppgtt || from->ppgtt == ppgtt) &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
@@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
@@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
int ret;
trace_switch_mm(engine, to);
@@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
+ engine->legacy_active_context = to;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 7032c542a9b1..4dd4c2159a92 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
goto err_unpin;
}
+ ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
+ if (ret)
+ goto err_unpin;
+
ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
return true;
case MUTEX_TRYLOCK_FAILED:
+ *unlock = false;
+ preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
- return true;
+ break;
}
} while (!need_resched());
+ preempt_enable();
+ return *unlock;
- return false;
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
}
BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
u32 *cs;
int i;
- cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+ cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+ *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 639d45c1dd2e..7ea7fd1e8856 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
- * so look for all the possible values for each port and abort if more
- * than one is found. */
+ * so look for all the possible values for each port.
+ */
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
- /* Find the child device to use, abort if more than one found. */
+ /*
+ * Find the first child device to reference the port, report if more
+ * than one found.
+ */
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
- return;
+ } else {
+ child = it;
}
- child = it;
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Program the max register to clamp values > 1.0. */
+ i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 9edeaaef77ad..d3b3252a8742 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
+ return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9471c88d449e..cc484b56eeaa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3485,6 +3485,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
!gpu_reset_clobbers_display(dev_priv))
return;
+ /* We have a modeset vs reset deadlock, defensively unbreak it.
+ *
+ * FIXME: We can do a _lot_ better, this is just a first iteration.
+ */
+ i915_gem_set_wedged(dev_priv);
+ DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 6e09ceb71500..150a156f3b1e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
- u8 data;
+ u8 data = 0;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 7158c7ce9c09..91c07b0c8db9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
if (!gpio_desc) {
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- "panel", gpio_index,
+ NULL, gpio_index,
value ? GPIOD_OUT_LOW :
GPIOD_OUT_HIGH);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7404cf2aac28..2afa4daa88e8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static u8 gtiir[] = {
+ [RCS] = 0,
+ [BCS] = 0,
+ [VCS] = 1,
+ [VCS2] = 1,
+ [VECS] = 3,
+};
+
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- /* After a GPU reset, we may have requests to replay */
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+ /*
+ * Clear any pending interrupt state.
+ *
+ * We do it twice out of paranoia that some of the IIR are double
+ * buffered, and if we only reset it once there may still be
+ * an interrupt pending.
+ */
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* After a GPU reset, we may have requests to replay */
submit = false;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
if (!port_isset(&port[n]))
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 52b3a1fd4059..57ef5833c427 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ enum {
};
/* Logical Rings */
-void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5abef482eacf..beb9baaf2f2e 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!IS_GEN9(dev_priv)) {
- DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ if (!HAS_LSPCON(dev_priv)) {
+ DRM_ERROR("LSPCON is not supported on this platform\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val;
+ return panel->backlight.max - val + panel->backlight.min;
}
return val;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6276bb834b4f..d3845989a29d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
return;
}
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
- ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
+ ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
- ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
- select QCOM_MDT_LOADER
+ select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..f9eae03aa1dc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
+ struct device_node *np;
+ struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+ return -EINVAL;
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np)
+ return -ENODEV;
+
+ np = of_parse_phandle(np, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
- mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
+ if (mem_region)
+ memunmap(mem_region);
+
release_firmware(fw);
return ret;
}
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
- return -ENODEV;
-}
-#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
-struct a5xx_hwcg {
+static const struct {
u32 offset;
u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
-static const struct {
- int (*test)(struct adreno_gpu *gpu);
- const struct a5xx_hwcg *regs;
- unsigned int count;
-} a5xx_hwcg_regs[] = {
- { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
- const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
- for (i = 0; i < count; i++)
- gpu_write(gpu, regs[i].offset, regs[i].value);
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
- gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
- gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
- if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
- _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
- a5xx_hwcg_regs[i].count);
- return;
- }
- }
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
- struct device_node *node;
- int ret;
-
- if (dev->parent)
- return 0;
-
- /* Find the sub-node for the zap shader */
- node = of_get_child_by_name(parent->of_node, "zap-shader");
- if (!node) {
- DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
- return -ENODEV;
- }
-
- dev->parent = parent;
- dev->of_node = node;
- dev_set_name(dev, "adreno_zap_shader");
-
- ret = device_register(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
- goto out;
- }
-
- ret = of_reserved_mem_device_init(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
- device_unregister(dev);
- }
-
-out:
- if (ret)
- dev->parent = NULL;
-
- return ret;
-}
-
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
- if (!ret)
- ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
- adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
- a5xx_enable_hwcg(gpu);
+ a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (a5xx_gpu->zap_dev.parent)
- device_unregister(&a5xx_gpu->zap_dev);
-
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
- 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
- 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
- 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
- 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
- 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
- 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
- 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
- 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
- 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
- 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
- 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
- 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
- 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
- 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
- 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
- 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
- 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
- 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
- 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
- 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
- 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
- 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
- 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
- ~0
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+ 0xB9A0, 0xB9BF, ~0
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+ a5xx_set_hwcg(gpu, true);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..1137092241d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,8 +36,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
-
- struct device zap_dev;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
}
bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..7414c6bbd582 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
- if (adreno_gpu->funcs->get_timestamp)
- return adreno_gpu->funcs->get_timestamp(gpu, value);
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..c7b612c3d771 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return;
+ }
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
- ret = dsi_calc_clk_rate(msm_host);
- if (ret) {
- pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..735a87a699fa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
- enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ mdp5_enable(mdp5_kms);
goto set_cursor;
}
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
get_roi(crtc, &roi_w, &roi_h);
+ mdp5_enable(mdp5_kms);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
+ mdp5_disable(mdp5_kms);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
+ mdp5_enable(mdp5_kms);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
+ mdp5_disable(mdp5_kms);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..70bef51245af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
- mdp5_cmd_encoder_disable(encoder);
+ mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..1c603aef3c59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
- struct clk *clk = devm_clk_get(dev, name);
+ struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
}
/* mandatory clocks: */
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..61f39c86dd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- struct phase_step step = { 0 };
- struct pixel_ext pe = { 0 };
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..a0c60e738db8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages;
vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unlock;
+ }
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail:
del_vma(vma);
-
+unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL);
+ mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
- if (!vma->iova)
+ if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..88582af8bd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Create output path objects for each VBIOS display path. */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (ver < 0x40) /* No support for chipsets prior to NV50. */
+ break;
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c6b1b7f3a2a3..c16bc0a7115b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv = drm->dev_private;
+ struct rockchip_drm_private *priv;
+
+ if (!drm)
+ return 0;
drm_kms_helper_poll_disable(drm);
rockchip_drm_fb_suspend(drm);
+ priv = drm->dev_private;
priv->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(priv->state)) {
rockchip_drm_fb_resume(drm);
@@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv = drm->dev_private;
+ struct rockchip_drm_private *priv;
+
+ if (!drm)
+ return 0;
+ priv = drm->dev_private;
drm_atomic_helper_resume(drm, priv->state);
rockchip_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..2900f1410d95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- int ret;
+ int ret, i;
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
}
memcpy(vop->regs, vop->regsbak, vop->len);
+ /*
+ * We need to make sure that all windows are disabled before we
+ * enable the crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
vop_cfg_done(vop);
/*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
static void vop_crtc_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- int i;
WARN_ON(vop->event);
rockchip_drm_psr_deactivate(&vop->crtc);
- /*
- * We need to make sure that all windows are disabled before we
- * disable that crtc. Otherwise we might try to scan from a destroyed
- * buffer later.
- */
- for (i = 0; i < vop->data->win_size; i++) {
- struct vop_win *vop_win = &vop->win[i];
- const struct vop_win_data *win = vop_win->data;
-
- spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
- spin_unlock(&vop->reg_lock);
- }
-
- vop_cfg_done(vop);
-
drm_crtc_vblank_off(crtc);
/*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
+ }
return 0;
}
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format);
- VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+ VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
if (is_yuv_support(fb->format->format)) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
- VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+ VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..27eefbfcf3d0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
act_height = (src_h + vskiplines - 1) / vskiplines;
+ if (act_height == dst_h)
+ return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
return GET_SCL_FT_BILI_DN(act_height, dst_h);
}
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..8fe5b184b4e8 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -7,7 +7,6 @@ config DRM_STM
select DRM_PANEL
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
- default y
help
Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index abc7d8fe06b4..a45a627283a1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -25,12 +25,20 @@
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+static void sun4i_drv_lastclose(struct drm_device *dev)
+{
+ struct sun4i_drv *drv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(drv->fbdev);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
+ .lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61e06f0e8cd3..625ba24f143f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
}
+/**
+ * vmw_kms_atomic_commit - Perform an atomic state commit
+ *
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: Whether nonblocking behaviour is requested
+ *
+ * This is a simple wrapper around drm_atomic_helper_commit() for
+ * us to clear the nonblocking value.
+ *
+ * Nonblocking commits currently cause synchronization issues
+ * for vmwgfx.
+ *
+ * RETURNS
+ * Zero for success or negative error code on failure.
+ */
+int vmw_kms_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ return drm_atomic_helper_commit(dev, state, false);
+}
+
+
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = vmw_kms_atomic_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 08766c6e7856..87a20b3dcf7a 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,6 +1,7 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+ depends on DRM || !DRM # if DRM=m, this can't be 'y'
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1006b230b236..65fa29591d21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+ depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index f19348328a71..6fdf9231c23c 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
}
/* We are in an invalid state; reset bus to a known state. */
- if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) {
+ if (!bus->msgs) {
dev_err(bus->dev, "bus in unknown state");
bus->cmd_err = -EIO;
- aspeed_i2c_do_stop(bus);
+ if (bus->master_state != ASPEED_I2C_MASTER_STOP)
+ aspeed_i2c_do_stop(bus);
goto out_no_complete;
}
msg = &bus->msgs[bus->msgs_index];
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2ea6d0d25a01..2b98a173136f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
- DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED |
- DW_IC_CON_SPEED_FAST;
+ DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
dev->mode = DW_IC_SLAVE;
@@ -257,7 +256,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct dw_i2c_dev *dev;
u32 acpi_speed, ht = 0;
struct resource *mem;
- int irq, ret;
+ int i, irq, ret;
+ const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -299,6 +299,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/*
+ * Some DSTDs use a non standard speed, round down to the lowest
+ * standard speed.
+ */
+ for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed < supported_speeds[i])
+ break;
+ }
+ acpi_speed = supported_speeds[i - 1];
+
+ /*
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
*/
@@ -319,7 +329,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->clk_freq != 100000 && dev->clk_freq != 400000
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
- "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+ dev->clk_freq);
ret = -EINVAL;
goto exit_reset;
}
@@ -426,7 +437,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
-static int dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -448,11 +459,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dw_i2c_plat_suspend(struct device *dev)
+{
+ pm_runtime_resume(dev);
+ return dw_i2c_plat_runtime_suspend(dev);
+}
+#endif
+
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+ dw_i2c_plat_resume,
+ NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 0548c7ea578c..78d8fb73927d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
return -EBUSY;
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
+ pm_runtime_get_sync(dev->dev);
+
/*
* Set slave address in the IC_SAR register,
* the address to which the DW_apb_i2c responds.
@@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
dev->disable_int(dev);
dev->disable(dev);
dev->slave = NULL;
+ pm_runtime_put(dev->dev);
return 0;
}
@@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
- if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY))
+ if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0;
dev_dbg(dev->dev,
@@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret)
dev_err(dev->dev, "failure adding adapter: %d\n", ret);
- pm_runtime_put_noidle(dev->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index e98e44e584a4..22ffcb73c185 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -341,8 +341,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index b4685bb9b5d7..adca51a99487 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
iounmap(pd->reg);
err_res:
- release_resource(pd->ioarea);
- kfree(pd->ioarea);
+ release_mem_region(pd->ioarea->start, size);
err:
kfree(pd);
@@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
i2c_del_adapter(&pd->adap);
iounmap(pd->reg);
- release_resource(pd->ioarea);
- kfree(pd->ioarea);
+ release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
kfree(pd);
return 0;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4842ec3a5451..a9126b3cda61 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ if (!(client && matches))
+ return NULL;
+
+ return acpi_match_device(matches, &client->dev);
+}
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
{
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
{
return ACPI_COMPANION(dev) == data;
}
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
- i2c_acpi_match_adapter);
+ i2c_acpi_find_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+ dev = bus_find_device(&i2c_bus_type, NULL, adev,
+ i2c_acpi_find_match_device);
return dev ? i2c_verify_client(dev) : NULL;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c89dac7fd2e7..56e46581b84b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -353,10 +353,11 @@ static int i2c_device_probe(struct device *dev)
}
/*
- * An I2C ID table is not mandatory, if and only if, a suitable Device
- * Tree match table entry is supplied for the probing device.
+ * An I2C ID table is not mandatory, if and only if, a suitable OF
+ * or ACPI ID table is supplied for the probing device.
*/
if (!driver->id_table &&
+ !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
!i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV;
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 3b63f5e5b89c..3d3d9bf02101 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
#ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ return NULL;
+}
#endif /* CONFIG_ACPI */
extern struct notifier_block i2c_acpi_notifier;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 2c64d0e0740f..17121329bb79 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
different sets of pins at run-time.
This driver can also be built as a module. If so, the module will be
- called pinctrl-i2cmux.
+ called i2c-mux-pinctrl.
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
struct regmap *regmap;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
goto out_fix_power_state;
}
- if (state)
- atomic_inc(&data->active_intr);
- else
- atomic_dec(&data->active_intr);
-
return 0;
out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
- if (atomic_read(&data->active_intr))
- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..e44f62bf9caa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.en_mask = 0x08,
},
},
+ .sim = {
+ .addr = 0x24,
+ .value = BIT(0),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = true,
.bootime = 2, /* guess */
},
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(7),
+ },
.multi_read_bit = false,
.bootime = 2, /* guess */
},
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
+#include <linux/iopoll.h>
#define ASPEED_RESOLUTION_BITS 10
#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
#define ASPEED_ENGINE_ENABLE BIT(0)
+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME 500
+#define ASPEED_ADC_INIT_TIMEOUT 500000
+
struct aspeed_adc_model_data {
const char *model_name;
unsigned int min_sampling_rate; // Hz
unsigned int max_sampling_rate; // Hz
unsigned int vref_voltage; // mV
+ bool wait_init_sequence;
};
struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
goto scaler_error;
}
+ model_data = of_device_get_match_data(&pdev->dev);
+
+ if (model_data->wait_init_sequence) {
+ /* Enable engine in normal mode. */
+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ /* Wait for initial sequence complete. */
+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+ adc_engine_control_reg_val,
+ adc_engine_control_reg_val &
+ ASPEED_ADC_CTRL_INIT_RDY,
+ ASPEED_ADC_INIT_POLLING_TIME,
+ ASPEED_ADC_INIT_TIMEOUT);
+ if (ret)
+ goto scaler_error;
+ }
+
/* Start all channels in normal mode. */
ret = clk_prepare_enable(data->clk_scaler->clk);
if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
.vref_voltage = 1800, // mV
.min_sampling_rate = 1,
.max_sampling_rate = 1000000,
+ .wait_init_sequence = true,
};
static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
+#define AXP288_ADC_TS_PIN_GPADC 0xF2
+#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+ unsigned long address)
+{
+ int ret;
+
+ /* channels other than GPADC do not need to switch TS pin */
+ if (address != AXP288_GP_ADC_H)
+ return 0;
+
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ if (ret)
+ return ret;
+
+ /* When switching to the GPADC pin give things some time to settle */
+ if (mode == AXP288_ADC_TS_PIN_GPADC)
+ usleep_range(6000, 10000);
+
+ return 0;
+}
+
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ chan->address)) {
+ dev_err(&indio_dev->dev, "GPADC mode\n");
+ ret = -EINVAL;
+ break;
+ }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+ /* ADC should be always enabled for internal FG to function */
+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+ return -EIO;
+
+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ ret = axp288_adc_set_state(axp20x->regmap);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 232c0b80d658..c3f86138cb55 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
{
struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- unsigned int sampling_us = SAMPLING_PERIOD(chip);
+ int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us;
/*
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index e09233b03c05..609676384f5e 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,7 +64,7 @@
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
/* STM32 H7 maximum analog clock rate (from datasheet) */
-#define STM32H7_ADC_MAX_CLK_RATE 72000000
+#define STM32H7_ADC_MAX_CLK_RATE 36000000
/**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
@@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
return -EINVAL;
}
- priv->common.rate = rate;
+ priv->common.rate = rate / stm32f4_pclk_div[i];
val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
val &= ~STM32F4_ADC_ADCPRE_MASK;
val |= i << STM32F4_ADC_ADCPRE_SHIFT;
writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
- rate / (stm32f4_pclk_div[i] * 1000));
+ priv->common.rate / 1000);
return 0;
}
@@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
out:
/* rate used later by each ADC instance to control BOOST mode */
- priv->common.rate = rate;
+ priv->common.rate = rate / div;
/* Set common clock mode and prescaler */
val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@@ -260,7 +260,7 @@ out:
writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
- ckmode ? "bus" : "adc", div, rate / (div * 1000));
+ ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
return 0;
}
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
+ disable_irq(irq);
mutex_unlock(&info->mutex);
return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->temp_data_irq);
return IRQ_HANDLED;
}
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->fifo_data_irq);
return IRQ_HANDLED;
}
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
-#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 16ade0a0327b..0e4b379ada45 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
- if (!atomic_read(&st->user_requested_state))
- return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val);
}
+ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
+ st->pdev->name, state_val, report_val);
+
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
@@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
pm_runtime_mark_last_busy(&st->pdev->dev);
+ pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
if (ret < 0) {
@@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
- pm_runtime_use_autosuspend(&attrb->pdev->dev);
-
return ret;
error_unreg_trigger:
iio_trigger_unregister(trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..6e6a1ecc99dd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -550,6 +550,31 @@ out:
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+ const struct st_sensor_settings *sensor_settings)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device_node *np = sdata->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+
+ pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+ if (((np && of_property_read_bool(np, "spi-3wire")) ||
+ (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+ int err;
+
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+ sensor_settings->sim.addr,
+ sensor_settings->sim.value);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to init interface mode\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
+ err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+ if (err < 0)
+ return err;
+
if (sensor_settings[i].wai_addr) {
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8cf84d3488b2..12898424d838 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500),
- .accel_max_scale = 5,
+ .accel_max_scale = 10,
},
[ADIS16485] = {
.channels = adis16485_channels,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8e1b0861fbe4..c38563699984 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = {
.addr = 0x62,
.mask_int1 = 0x01,
- .addr_ihl = 0x63,
- .mask_ihl = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .addr_stat_drdy = 0x67,
},
.multi_read_bit = false,
.bootime = 2,
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index d82b788374b6..0d2ea3ee371b 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data,
}
adc_temp = be32_to_cpu(tmp) >> 12;
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
comp_temp = bmp280_compensate_temp(data, adc_temp);
/*
@@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data,
}
adc_press = be32_to_cpu(tmp) >> 12;
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
comp_press = bmp280_compensate_press(data, adc_press);
*val = comp_press;
@@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
}
adc_humidity = be16_to_cpu(tmp);
+ if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading humidity skipped\n");
+ return -EIO;
+ }
comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
*val = comp_humidity;
@@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = {
static int bme280_chip_config(struct bmp280_data *data)
{
- int ret = bmp280_chip_config(data);
+ int ret;
u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
+ /*
+ * Oversampling of humidity must be set before oversampling of
+ * temperature/pressure is set to become effective.
+ */
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+ BMP280_OSRS_HUMIDITY_MASK, osrs);
+
if (ret < 0)
return ret;
- return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
- BMP280_OSRS_HUMIDITY_MASK, osrs);
+ return bmp280_chip_config(data);
}
static const struct bmp280_chip_info bme280_chip_info = {
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2c770e13be0e..61347438b779 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -96,6 +96,11 @@
#define BME280_CHIP_ID 0x60
#define BMP280_SOFT_RESET_VAL 0xB6
+/* BMP280 register skipped special values */
+#define BMP280_TEMP_SKIPPED 0x80000
+#define BMP280_PRESS_SKIPPED 0x80000
+#define BMP280_HUMIDITY_SKIPPED 0x8000
+
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..f1bce05ffa13 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = true,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index d22bc56dd9fc..25ad6abfee22 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- {
- u32 cnt;
-
- regmap_read(priv->regmap, TIM_CNT, &cnt);
- *val = cnt;
+ regmap_read(priv->regmap, TIM_CNT, &dat);
+ *val = dat;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ *val = (dat & TIM_CR1_CEN) ? 1 : 0;
return IIO_VAL_INT;
- }
- case IIO_CHAN_INFO_SCALE:
- {
- u32 smcr;
- regmap_read(priv->regmap, TIM_SMCR, &smcr);
- smcr &= TIM_SMCR_SMS;
+ case IIO_CHAN_INFO_SCALE:
+ regmap_read(priv->regmap, TIM_SMCR, &dat);
+ dat &= TIM_SMCR_SMS;
*val = 1;
*val2 = 0;
/* in quadrature case scale = 0.25 */
- if (smcr == 3)
+ if (dat == 3)
*val2 = 2;
return IIO_VAL_FRACTIONAL_LOG2;
}
- }
return -EINVAL;
}
@@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- regmap_write(priv->regmap, TIM_CNT, val);
+ return regmap_write(priv->regmap, TIM_CNT, val);
- return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* fixed scale */
return -EINVAL;
+
+ case IIO_CHAN_INFO_ENABLE:
+ if (val) {
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ if (!(dat & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ TIM_CR1_CEN);
+ } else {
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ 0);
+ if (dat & TIM_CR1_CEN)
+ clk_disable(priv->clk);
+ }
+ return 0;
}
return -EINVAL;
@@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_SMCR, &smcr);
- return smcr == TIM_SMCR_SMS ? 0 : -EINVAL;
+ return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
}
static const struct iio_enum stm32_trigger_mode_enum = {
@@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
+ u32 val;
if (sms < 0)
return sms;
+ /*
+ * Triggered mode sets CEN bit automatically by hardware. So, first
+ * enable counter clock, so it can use it. Keeps it in sync with CEN.
+ */
+ if (sms == 6) {
+ regmap_read(priv->regmap, TIM_CR1, &val);
+ if (!(val & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+ }
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 smcr;
+ int mode;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
- smcr &= TIM_SMCR_SMS;
+ mode = (smcr & TIM_SMCR_SMS) - 1;
+ if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
+ return -EINVAL;
- return smcr - 1;
+ return mode;
}
static const struct iio_enum stm32_quadrature_mode_enum = {
@@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = {
static int stm32_set_count_direction(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
- unsigned int mode)
+ unsigned int dir)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 val;
+ int mode;
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode);
+ /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
+ regmap_read(priv->regmap, TIM_SMCR, &val);
+ mode = (val & TIM_SMCR_SMS) - 1;
+ if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
+ return -EBUSY;
- return 0;
+ return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
+ dir ? TIM_CR1_DIR : 0);
}
static int stm32_get_count_direction(struct iio_dev *indio_dev,
@@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_CR1, &cr1);
- return (cr1 & TIM_CR1_DIR);
+ return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
}
static const struct iio_enum stm32_count_direction_enum = {
@@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
static const struct iio_chan_spec stm32_trigger_channel = {
.type = IIO_COUNT,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
.ext_info = stm32_trigger_count_info,
.indexed = 1
};
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 01236cef7bfb..437522ca97b4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,6 +61,7 @@ struct addr_req {
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
+ struct delayed_work work;
int status;
u32 seq;
};
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(unsigned long time)
+static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
{
unsigned long delay;
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, &work, delay);
+ mod_delayed_work(addr_wq, delayed_work, delay);
}
static void queue_req(struct addr_req *req)
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
list_add(&req->list, &temp_req->list);
- if (req_list.next == &req->list)
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
}
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
}
+static void process_one_req(struct work_struct *_work)
+{
+ struct addr_req *req;
+ struct sockaddr *src_in, *dst_in;
+
+ mutex_lock(&lock);
+ req = container_of(_work, struct addr_req, work.work);
+
+ if (req->status == -ENODATA) {
+ src_in = (struct sockaddr *)&req->src_addr;
+ dst_in = (struct sockaddr *)&req->dst_addr;
+ req->status = addr_resolve(src_in, dst_in, req->addr,
+ true, req->seq);
+ if (req->status && time_after_eq(jiffies, req->timeout)) {
+ req->status = -ETIMEDOUT;
+ } else if (req->status == -ENODATA) {
+ /* requeue the work for retrying again */
+ set_timeout(&req->work, req->timeout);
+ mutex_unlock(&lock);
+ return;
+ }
+ }
+ list_del(&req->list);
+ mutex_unlock(&lock);
+
+ req->callback(req->status, (struct sockaddr *)&req->src_addr,
+ req->addr, req->context);
+ put_client(req->client);
+ kfree(req);
+}
+
static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
+ else if (req->status == -ENODATA) {
+ set_timeout(&req->work, req->timeout);
continue;
+ }
}
list_move_tail(&req->list, &done_list);
}
- if (!list_empty(&req_list)) {
- req = list_entry(req_list.next, struct addr_req, list);
- set_timeout(req->timeout);
- }
mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list);
+ /* It is safe to cancel other work items from this work item
+ * because at a time there can be only one work item running
+ * with this single threaded work queue.
+ */
+ cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context);
put_client(req->client);
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
+ INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
req->status = -ECANCELED;
req->timeout = jiffies;
list_move(&req->list, &req_list);
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
break;
}
}
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->nud_state & NUD_VALID) {
- set_timeout(jiffies);
- }
+ if (neigh->nud_state & NUD_VALID)
+ set_timeout(&work, jiffies);
}
return 0;
}
@@ -820,7 +854,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
+ addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a5dfab6adf49..221468f77128 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
- mutex_unlock(&device_mutex);
-
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+
+ mutex_unlock(&device_mutex);
+
ib_cache_cleanup_one(device);
ib_security_destroy_port_pkey_list(device);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8c4ec564e495..55e8f5ed8b3c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
return 0;
}
-static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
- address + PAGE_SIZE,
- invalidate_page_trampoline, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
-}
-
static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
u64 end, void *cookie)
{
@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops ib_umem_notifiers = {
.release = ib_umem_notifier_release,
- .invalidate_page = ib_umem_notifier_invalidate_page,
.invalidate_range_start = ib_umem_notifier_invalidate_range_start,
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 2c98533a0203..739bd69ef1d4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
cq->uobject = &obj->uobject;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
- cq->cq_context = &ev_file->ev_queue;
+ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0);
obj->uobject.object = cq;
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_resize_cq cmd;
- struct ib_uverbs_resize_cq_resp resp;
+ struct ib_uverbs_resize_cq_resp resp = {};
struct ib_udata udata;
struct ib_cq *cq;
int ret = -EINVAL;
@@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file,
qp->qp_type = attr.qp_type;
atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
+ qp->port = 0;
if (attr.send_cq)
atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
@@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file,
attr->alt_timeout = cmd->base.alt_timeout;
attr->rate_limit = cmd->rate_limit;
- attr->ah_attr.type = rdma_ah_find_type(qp->device,
- cmd->base.dest.port_num);
+ if (cmd->base.attr_mask & IB_QP_AV)
+ attr->ah_attr.type = rdma_ah_find_type(qp->device,
+ cmd->base.dest.port_num);
if (cmd->base.dest.is_global) {
rdma_ah_set_grh(&attr->ah_attr, NULL,
cmd->base.dest.flow_label,
@@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file,
rdma_ah_set_port_num(&attr->ah_attr,
cmd->base.dest.port_num);
- attr->alt_ah_attr.type = rdma_ah_find_type(qp->device,
- cmd->base.dest.port_num);
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH)
+ attr->alt_ah_attr.type =
+ rdma_ah_find_type(qp->device, cmd->base.dest.port_num);
if (cmd->base.alt_dest.is_global) {
rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
cmd->base.alt_dest.flow_label,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3d2609608f58..5e530d2bee44 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
+ kobject_put(&file->device->kobj);
kfree(file);
}
@@ -917,7 +918,6 @@ err:
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_uverbs_device *dev = file->device;
mutex_lock(&file->cleanup_mutex);
if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
ib_uverbs_release_async_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
- kobject_put(&dev->kobj);
return 0;
}
@@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext;
@@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* for example due to freeing the resources
* (e.g mmput).
*/
+ ib_uverbs_event_handler(&file->event_handler, &event);
ib_dev->disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index fb98ed67d5bc..b456e3ca1876 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->mr_lock);
INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs);
+ qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
return ib_create_xrc_qp(qp, qp_init_attr);
@@ -895,7 +896,6 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
- [IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = {
.valid = 1,
.req_param = {
@@ -1298,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
if (ret)
return ret;
}
- return ib_security_modify_qp(qp, attr, attr_mask, udata);
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
+ return ret;
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 5332f06b99ba..c2fba76becd4 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
use_dsgl))
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index ccbf52c8ff6f..e4b56a0dd6d0 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,8 +67,6 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
- unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = {
- .invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start,
};
@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static inline void mmu_notifier_page(struct mmu_notifier *mn,
- struct mm_struct *mm, unsigned long addr)
-{
- mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
-}
-
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index f78a733a63ec..d545302b8ef8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
} else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
- if (!dmac)
+ if (!dmac) {
+ kfree(ah);
return ERR_PTR(-EINVAL);
+ }
memcpy(ah->av.mac, dmac, ETH_ALEN);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 23fad6d96944..2540b65e242c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
continue;
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (IS_ERR(free_mr->mr_free_qp[i])) {
+ if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n");
goto create_lp_qp_failed;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 9ec1ae9a82c9..a49ff2eb6fb3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
u64 base = 0;
u32 i, j;
u32 k = 0;
- u32 low;
/* copy base values in obj_info */
- for (i = I40IW_HMC_IW_QP, j = 0;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ if ((i == I40IW_HMC_IW_SRQ) ||
+ (i == I40IW_HMC_IW_FSIMC) ||
+ (i == I40IW_HMC_IW_FSIAV)) {
+ info[i].base = 0;
+ info[i].cnt = 0;
+ continue;
+ }
get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512;
if (info[i].base > base) {
base = info[i].base;
k = i;
}
- low = (u32)(temp);
- if (low)
- info[i].cnt = low;
+ if (i == I40IW_HMC_IW_APBVT_ENTRY) {
+ info[i].cnt = 1;
+ continue;
+ }
+ if (i == I40IW_HMC_IW_QP)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ else if (i == I40IW_HMC_IW_CQ)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ else
+ info[i].cnt = (u32)(temp);
}
size = info[k].cnt * info[k].size + info[k].base;
if (size & 0x1FFFFF)
@@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
}
/**
+ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 i40iw_sc_decode_fpm_query(u64 *buf,
+ u32 buf_idx,
+ struct i40iw_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+ return temp;
+}
+
+/**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer
* @info: ptr to i40iw_hmc_obj_info struct
@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
struct i40iw_hmc_info *hmc_info,
struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
{
- u64 temp;
struct i40iw_hmc_obj_info *obj_info;
- u32 i, j, size;
+ u64 temp;
+ u32 size;
u16 max_pe_sds;
obj_info = hmc_info->hmc_obj;
@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
- for (i = I40IW_HMC_IW_QP, j = 8;
- i <= I40IW_HMC_IW_ARP; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- if (i == I40IW_HMC_IW_QP)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
- else if (i == I40IW_HMC_IW_CQ)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
- else
- obj_info[i].max_cnt = (u32)temp;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = ((u64)1 << size);
- }
- for (i = I40IW_HMC_IW_MR, j = 48;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- obj_info[i].max_cnt = (u32)temp;
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = LS_64_1(1, size);
- }
+ get_64bit_val(buf, 16, &temp);
+ obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
+
+ i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
+ i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
+
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
+ i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
get_64bit_val(buf, 64, &temp);
+ obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
if (!hmc_fpm_misc->xf_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
+
get_64bit_val(buf, 80, &temp);
+ obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_Q1FL].size = 4;
hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
if (!hmc_fpm_misc->q1_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+
return 0;
}
@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
hmc_info->sd_table.sd_entry = virt_mem.va;
}
- /* fill size of objects which are fixed */
- hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
return ret_code;
}
@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{
u8 fcn_id = vsi->fcn_id;
- if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+ if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
vsi->dev->fcn_id_array[fcn_id] = false;
i40iw_hw_stats_stop_timer(vsi);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index a39ac12b6a7e..2ebaadbed379 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1507,8 +1507,8 @@ enum {
I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
I40IW_SHADOWAREA_MASK = (128 - 1),
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
};
enum i40iw_alignment {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 71050c5d29a0..7f5583d83622 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
- I40IW_CQ0_ALIGNMENT_MASK);
+ I40IW_CQ0_ALIGNMENT);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index 91c421762f06..f7013f11d808 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -62,7 +62,7 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
- I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+ I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index b0d3a0e8a9b5..1060725d18bc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_rdma_write;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+ info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size)
{
if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a7f2e60085c4..f7fcde1ff0aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
IB_LINK_LAYER_INFINIBAND);
+ /* CM layer calls ib_modify_port() regardless of the link layer. For
+ * Ethernet ports, qkey violation and Port capabilities are meaningless.
+ */
+ if (!is_ib)
+ return 0;
+
if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ae0746754008..3d701c7a4c91 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type != IB_QPT_RC) {
av = *wqe;
- if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+ if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
else
*wqe += sizeof(struct mlx5_base_av);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0889ff367c86..f58f8f5f3ebe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err_destroy_tis;
sq->base.container_mibqp = qp;
+ sq->base.mqp.event = mlx5_ib_qp_event;
}
if (qp->rq.wqe_cnt) {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 69bda611d313..90aa326fd7c0 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
struct pvrdma_dev *dev = to_vdev(ibcq->device);
struct pvrdma_cq *cq = to_vcq(ibcq);
u32 val = cq->cq_handle;
+ unsigned long flags;
+ int has_data = 0;
val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
pvrdma_write_uar_cq(dev, val);
- return 0;
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned int head;
+
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data == PVRDMA_INVALID_IDX))
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return has_data;
}
/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ff50a7bd66d8..7ac25059c40f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
unsigned long flags;
struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index f87d104837dc..d69410c2ed97 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
- p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */
case IB_CM_REJ_RECEIVED:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 7871379342f4..184a22f48027 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped),
- IPOIB_NETDEV_STAT(tx_dropped)
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
};
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 57a9655e844d..2e075377242e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
return pending;
}
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
+}
+
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
*/
qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */
begin = jiffies;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 4ce315c92b48..6c77df34869d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags);
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted);
- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */
stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
};
void ipoib_setup_common(struct net_device *dev)
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
- if (!result)
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
- else {
+ if (result) {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port);
goto device_init_failed;
}
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
- } else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ }
+
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
- if (result < 0) {
+ if (result) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 057f58e6afca..93e149efc1f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- spin_lock_irqsave(&priv->lock, flags);
- cancel_delayed_work(&priv->mcast_task);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- flush_workqueue(priv->wq);
+ cancel_delayed_work_sync(&priv->mcast_task);
return 0;
}
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
{
struct ipoib_mcast *mcast, *tmcast;
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
+ mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
}
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
/*
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 298a6ba51411..ca0e19ae7a90 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = {
};
/*
- * A rumble packet is required for some PowerA pads to start
+ * A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
-static const u8 xboxone_zerorumble_init[] = {
+static const u8 xboxone_rumblebegin_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x1D, 0x1D, 0xFF, 0x00, 0x00
+};
+
+/*
+ * A rumble packet with zero FF intensity will immediately
+ * terminate the rumbling required to init PowerA pads.
+ * This should happen fast enough that the motors don't
+ * spin up to enough speed to actually vibrate the gamepad.
+ */
+static const u8 xboxone_rumbleend_init[] = {
0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00
};
@@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
- XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
};
struct xpad_output_packet {
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index f600f3a7a3c6..23520df7650f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev)
error = gpiod_count(dev, NULL);
if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n");
- return error;
+ return -ENODEV;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 262d1057c1da..850b00e3ad8e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_TWO:
if (priv->flags & ALPS_BUTTONPAD) {
- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+ }
f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
} else {
- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ }
f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
}
f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) {
- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ } else {
+ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ }
+
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX_BL;
no_data_y = SS4_MFPACKET_NO_AY_BL;
} else {
- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ }
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX;
no_data_y = SS4_MFPACKET_NO_AY;
@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
memset(otp, 0, sizeof(otp));
- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
+ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
+ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
return -1;
alps_update_device_area_ss4_v2(otp, priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ed2d6879fa52..c80a7c76cb76 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
((_b[1 + _i * 3] << 5) & 0x1F00) \
)
+#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
+ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
+ )
+
#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
((_b[0 + (_i) * 3] >> 3) & 0x0010) \
)
+#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
+ )
+
#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 3) & 0x0008) \
)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 3b616cb7c67f..cfbc8ba4c96c 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1247,7 +1247,12 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0602", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 16c30460ef04..5af0b7d200bc 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse,
}
}
+static bool synaptics_has_agm(struct synaptics_data *priv)
+{
+ return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c));
+}
+
static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
{
static u8 param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
int error;
- if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)))
- return 0;
-
error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL);
if (error)
return error;
@@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
if (error)
return error;
- /* Advanced gesture mode also sends multi finger data */
- priv->info.capabilities |= BIT(1);
-
return 0;
}
@@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse)
if (error)
return error;
- if (priv->absolute_mode) {
+ if (priv->absolute_mode && synaptics_has_agm(priv)) {
error = synaptics_set_advanced_gesture_mode(psmouse);
if (error) {
psmouse_err(psmouse,
@@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[],
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
- if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) &&
- hw->w == 2) {
+ if (synaptics_has_agm(priv) && hw->w == 2) {
synaptics_parse_agm(buf, priv, hw);
return 1;
}
@@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
synaptics_report_mt_data(psmouse, sgm, num_fingers);
}
+static bool synaptics_has_multifinger(struct synaptics_data *priv)
+{
+ if (SYN_CAP_MULTIFINGER(priv->info.capabilities))
+ return true;
+
+ /* Advanced gesture mode also sends multi finger data */
+ return synaptics_has_agm(priv);
+}
+
/*
* called for each full received packet from the touchpad
*/
@@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
if (SYN_CAP_EXTENDED(info->capabilities)) {
switch (hw.w) {
case 0 ... 1:
- if (SYN_CAP_MULTIFINGER(info->capabilities))
+ if (synaptics_has_multifinger(priv))
num_fingers = hw.w + 2;
break;
case 2:
@@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
}
@@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse,
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 922ea02edcc3..0871010f18d5 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1;
- if (param[0] != TP_MAGIC_IDENT)
+ /* add new TP ID. */
+ if (!(param[0] & TP_MAGIC_IDENT))
return -1;
if (firmware_id)
@@ -380,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
return 0;
if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3a7d7a..88055755f82e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
+#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */
+ /* Firmware ID includes 0x1, 0x2, 0x3 */
/*
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 294a409e283b..d6b873b57054 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -574,7 +574,9 @@ struct amd_iommu {
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
{
- return container_of(dev, struct amd_iommu, iommu.dev);
+ struct iommu_device *iommu = dev_to_iommu_device(dev);
+
+ return container_of(iommu, struct amd_iommu, iommu);
}
#define ACPIHID_UID_LEN 256
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6629c472eafd..dccf5b76eff2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
return 0;
}
-static void mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- __mn_flush_page(mn, address);
-}
-
static void mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
- .invalidate_page = mn_invalidate_page,
.invalidate_range = mn_invalidate_range,
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b97188acc4f1..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+ /*
+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+ * will allocate/initialise a new one. Thus we need to update fwspec for
+ * later use.
+ */
+ fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 687f18f65cea..3e8636f1220e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void)
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
- return container_of(dev, struct intel_iommu, iommu.dev);
+ struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
+
+ return container_of(iommu_dev, struct intel_iommu, iommu);
}
static ssize_t intel_iommu_show_version(struct device *dev,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f167c0d84ebf..f620dccec8ee 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
intel_flush_svm_range(svm, address, 1, 1, 0);
}
-static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address)
-{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
- intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
/* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
.change_pte = intel_change_pte,
- .invalidate_page = intel_invalidate_page,
.invalidate_range = intel_invalidate_range,
};
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index c58351ed61c1..36d1a7ce7fc4 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
va_list vargs;
int ret;
- device_initialize(&iommu->dev);
+ iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
+ if (!iommu->dev)
+ return -ENOMEM;
- iommu->dev.class = &iommu_class;
- iommu->dev.parent = parent;
- iommu->dev.groups = groups;
+ device_initialize(iommu->dev);
+
+ iommu->dev->class = &iommu_class;
+ iommu->dev->parent = parent;
+ iommu->dev->groups = groups;
va_start(vargs, fmt);
- ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
+ ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
va_end(vargs);
if (ret)
goto error;
- ret = device_add(&iommu->dev);
+ ret = device_add(iommu->dev);
if (ret)
goto error;
+ dev_set_drvdata(iommu->dev, iommu);
+
return 0;
error:
- put_device(&iommu->dev);
+ put_device(iommu->dev);
return ret;
}
void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
- device_unregister(&iommu->dev);
+ dev_set_drvdata(iommu->dev, NULL);
+ device_unregister(iommu->dev);
+ iommu->dev = NULL;
}
/*
* IOMMU drivers can indicate a device is managed by a given IOMMU using
@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
if (!iommu || IS_ERR(iommu))
return -ENODEV;
- ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
+ ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
return ret;
- ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
+ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
if (ret)
- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
+ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
dev_name(link));
return ret;
@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
return;
sysfs_remove_link(&link->kobj, "iommu");
- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
+ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 28b26c80f4cf..072bd227b6c6 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
#define AT91_RTC_IMR 0x28
#define AT91_RTC_IRQ_MASK 0x1f
-void __init aic_common_rtc_irq_fixup(struct device_node *root)
+void __init aic_common_rtc_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np)
- np = of_find_compatible_node(root, NULL,
+ np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc");
if (!np)
@@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
-void __init aic_common_rtt_irq_fixup(struct device_node *root)
+void __init aic_common_rtt_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
@@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
return;
match = of_match_node(matches, root);
- of_node_put(root);
if (match) {
- void (*fixup)(struct device_node *) = match->data;
- fixup(root);
+ void (*fixup)(void) = match->data;
+ fixup();
}
of_node_put(root);
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index af60376d50de..242e62c1851e 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
const char *name, int nirqs,
const struct of_device_id *matches);
-void __init aic_common_rtc_irq_fixup(struct device_node *root);
+void __init aic_common_rtc_irq_fixup(void);
-void __init aic_common_rtt_irq_fixup(struct device_node *root);
+void __init aic_common_rtt_irq_fixup(void);
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 37f952dd9fc9..bb1ad451392f 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = {
.xlate = aic_irq_domain_xlate,
};
-static void __init at91rm9200_aic_irq_fixup(struct device_node *root)
+static void __init at91rm9200_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
-static void __init at91sam9260_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9260_aic_irq_fixup(void)
{
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtt_irq_fixup();
}
-static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9g45_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
}
static const struct of_device_id aic_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index c04ee9a23d09..6acad2ea0fb3 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = {
.xlate = aic5_irq_domain_xlate,
};
-static void __init sama5d3_aic_irq_fixup(struct device_node *root)
+static void __init sama5d3_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
static const struct of_device_id aic5_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index bddf169c4b37..b009b916a292 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 249240d9a425..833a90fe33ae 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
*dev_id = args.args[0];
break;
}
+ index++;
} while (!ret);
return ret;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 68932873eebc..284738add89b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node)
#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
-#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
+#ifdef CONFIG_ACPI_NUMA
struct its_srat_map {
/* numa node id */
u32 numa_node;
@@ -1843,7 +1843,7 @@ struct its_srat_map {
u32 its_id;
};
-static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
+static struct its_srat_map *its_srat_maps __initdata;
static int its_in_srat __initdata;
static int __init acpi_get_its_numa_node(u32 its_id)
@@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id)
return NUMA_NO_NODE;
}
+static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
const unsigned long end)
{
@@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
return -EINVAL;
}
- if (its_in_srat >= MAX_NUMNODES) {
- pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
- MAX_NUMNODES);
- return -EINVAL;
- }
-
node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
@@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
static void __init acpi_table_parse_srat_its(void)
{
+ int count;
+
+ count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+ gic_acpi_match_srat_its, 0);
+ if (count <= 0)
+ return;
+
+ its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
+ GFP_KERNEL);
+ if (!its_srat_maps) {
+ pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+ return;
+ }
+
acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
gic_acpi_parse_srat_its, 0);
}
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+ kfree(its_srat_maps);
+}
#else
static void __init acpi_table_parse_srat_its(void) { }
static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
#endif
static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
@@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void)
acpi_table_parse_srat_its();
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
gic_acpi_parse_madt_its, 0);
+ acpi_its_srat_maps_free();
}
#else
static void __init its_acpi_probe(void) { }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbffb7ab6203..984c3ecfd22c 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (static_key_true(&supports_deactivate))
gic_write_eoir(irqnr);
+ else
+ isb();
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
@@ -640,11 +642,16 @@ static void gic_smp_init(void)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
void __iomem *reg;
int enabled;
u64 val;
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1b1df4f770bd..d3e7c43718b8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ isb();
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
@@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
goto out;
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
- if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
handle_bad_irq(desc);
- else
+ } else {
+ isb();
generic_handle_irq(cascade_irq);
+ }
out:
chained_irq_exit(chip, desc);
@@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6ab1d3afec02..48ee1bad473f 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -1020,8 +1020,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */
- int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue;
};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
{
struct log_data *ib;
struct procdata *pd = card->proclog;
- int i;
unsigned long flags;
if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
else
pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */
- i = pd->del_lock++; /* get lock state */
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */
- if (!i)
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else
- break;
- } /* pd->log_head->next */
- pd->del_lock--; /* release lock level */
+ while (pd->log_head->next) {
+ if ((pd->log_head->usage_cnt <= 0) &&
+ (pd->log_head->next->usage_cnt <= 0)) {
+ ib = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(ib);
+ } else {
+ break;
+ }
+ } /* pd->log_head->next */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 78fc5d5e9051..92e6570b1143 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -26,7 +26,7 @@
#define FSM_TIMER_DEBUG 0
-void
+int
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
@@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
fsm->event_count, GFP_KERNEL);
+ if (fsm->jumpmatrix == NULL)
+ return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
@@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+ return 0;
}
EXPORT_SYMBOL(mISDN_FsmNew);
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
index 928f5be192c1..e1def8490221 100644
--- a/drivers/isdn/mISDN/fsm.h
+++ b/drivers/isdn/mISDN/fsm.h
@@ -55,7 +55,7 @@ struct FsmTimer {
void *arg;
};
-extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
extern void mISDN_FsmFree(struct Fsm *);
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
extern void mISDN_FsmChangeState(struct FsmInst *, int);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index bebc57b72138..3192b0eb3944 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -414,8 +414,7 @@ l1_init(u_int *deb)
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
- mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
- return 0;
+ return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
}
void
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 7243a6746f8b..9ff0903a0e89 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
int
Isdnl2_Init(u_int *deb)
{
+ int res;
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
- mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
- TEIInit(deb);
+ res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+ if (res)
+ goto error;
+ res = TEIInit(deb);
+ if (res)
+ goto error_fsm;
return 0;
+
+error_fsm:
+ mISDN_FsmFree(&l2fsm);
+error:
+ mISDN_unregister_Bprotocol(&X75SLP);
+ return res;
}
void
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 908127efccf8..12d9e5f4beb1 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
int TEIInit(u_int *deb)
{
+ int res;
debug = deb;
teifsmu.state_count = TEI_STATE_COUNT;
teifsmu.event_count = TEI_EVENT_COUNT;
teifsmu.strEvent = strTeiEvent;
teifsmu.strState = strTeiState;
- mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ if (res)
+ goto error;
teifsmn.state_count = TEI_STATE_COUNT;
teifsmn.event_count = TEI_EVENT_COUNT;
teifsmn.strEvent = strTeiEvent;
teifsmn.strState = strTeiState;
- mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ if (res)
+ goto error_smn;
deactfsm.state_count = DEACT_STATE_COUNT;
deactfsm.event_count = DEACT_EVENT_COUNT;
deactfsm.strEvent = strDeactEvent;
deactfsm.strState = strDeactState;
- mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ if (res)
+ goto error_deact;
return 0;
+
+error_deact:
+ mISDN_FsmFree(&teifsmn);
+error_smn:
+ mISDN_FsmFree(&teifsmu);
+error:
+ return res;
}
void TEIFree(void)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index cbca5e51b975..9b7005e1345e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -457,10 +457,8 @@ static int __init acpi_pcc_probe(void)
/* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
- if (ACPI_FAILURE(status) || !pcct_tbl) {
- pr_warn("PCCT header not found.\n");
+ if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
- }
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
sizeof(struct acpi_table_pcct),
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e8ab5bb3575..d24e4b05f5da 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
- return DM_MAPIO_REQUEUE;
}
return DM_MAPIO_DELAY_REQUEUE;
}
@@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error)
case BLK_STS_TARGET:
case BLK_STS_NEXUS:
case BLK_STS_MEDIUM:
- case BLK_STS_RESOURCE:
return 1;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2edbcc2d7d3f..d669fddd9290 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -27,16 +27,6 @@
#define DM_MSG_PREFIX "core"
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
@@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md,
}
/* drop the extra reference count */
- dec_pending(ci.io, error);
+ dec_pending(ci.io, errno_to_blk_status(error));
}
/*-----------------------------------------------------------------
* CRUD END
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c99634612fc4..b01e458d31e9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (mddev->safemode == 1)
mddev->safemode = 0;
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
- if (mddev->in_sync || !mddev->sync_checkers) {
+ if (mddev->in_sync || mddev->sync_checkers) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
@@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..2dcbafa8e66c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -236,9 +236,10 @@ struct r5l_io_unit {
bool need_split_bio;
struct bio *split_bio;
- unsigned int has_flush:1; /* include flush request */
- unsigned int has_fua:1; /* include fua request */
- unsigned int has_null_flush:1; /* include empty flush request */
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include null flush request */
+ unsigned int has_flush_payload:1; /* include flush payload */
/*
* io isn't sent yet, flush/fua request can only be submitted till it's
* the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
+ bool has_null_flush;
+ bool has_flush_payload;
if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+
+ /*
+ * if the io doesn't not have null_flush or flush payload,
+ * it is not safe to access it after releasing io_list_lock.
+ * Therefore, it is necessary to check the condition with
+ * the lock held.
+ */
+ has_null_flush = io->has_null_flush;
+ has_flush_payload = io->has_flush_payload;
+
if (log->need_cache_flush && !list_empty(&io->stripe_list))
r5l_move_to_end_ios(log);
else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
- if (io->has_null_flush) {
+ /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+ if (has_null_flush) {
struct bio *bi;
WARN_ON(bio_list_empty(&io->flush_barriers));
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
bio_endio(bi);
- atomic_dec(&io->pending_stripe);
+ if (atomic_dec_and_test(&io->pending_stripe)) {
+ __r5l_stripe_write_finished(io);
+ return;
+ }
}
}
-
- /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
- if (atomic_read(&io->pending_stripe) == 0)
- __r5l_stripe_write_finished(io);
+ /* decrease pending_stripe for flush payload */
+ if (has_flush_payload)
+ if (atomic_dec_and_test(&io->pending_stripe))
+ __r5l_stripe_write_finished(io);
}
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
payload->size = cpu_to_le32(sizeof(__le64));
payload->flush_stripes[0] = cpu_to_le64(sect);
io->meta_offset += meta_size;
+ /* multiple flush payloads count as one pending_stripe */
+ if (!io->has_flush_payload) {
+ io->has_flush_payload = 1;
+ atomic_inc(&io->pending_stripe);
+ }
mutex_unlock(&log->io_mutex);
}
@@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
*/
int r5c_journal_mode_set(struct mddev *mddev, int mode)
{
- struct r5conf *conf = mddev->private;
- struct r5l_log *log = conf->log;
-
- if (!log)
- return -ENODEV;
+ struct r5conf *conf;
+ int err;
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf || !conf->log) {
+ mddev_unlock(mddev);
+ return -ENODEV;
+ }
+
if (raid5_calc_degraded(conf) > 0 &&
- mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ mddev_unlock(mddev);
return -EINVAL;
+ }
mddev_suspend(mddev);
conf->log->r5c_journal_mode = mode;
mddev_resume(mddev);
+ mddev_unlock(mddev);
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
mdname(mddev), mode, r5c_journal_mode_str[mode]);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 99e644cda4d1..ebf69ff48ae2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate {
{ .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos}
#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \
- { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos}
+ { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos}
static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid,
struct atmel_ebi_dev_config *conf)
@@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid,
if (!ret) {
required = true;
ncycles = DIV_ROUND_UP(val, clk_period_ns);
- if (ncycles > ATMEL_SMC_MODE_TDF_MAX ||
- ncycles < ATMEL_SMC_MODE_TDF_MIN) {
+ if (ncycles > ATMEL_SMC_MODE_TDF_MAX) {
ret = -EINVAL;
goto out;
}
+ if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
+ ncycles = ATMEL_SMC_MODE_TDF_MIN;
+
smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles);
}
@@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid,
}
ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf);
- if (ret)
+ if (ret < 0)
return -EINVAL;
if ((ret > 0 && !required) || (!ret && required)) {
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index 954cf0f66a31..20cc0ea470fa 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse);
* parameter
*
* This function encodes the @ncycles value as described in the datasheet
- * (section "SMC Pulse Register"), and then stores the result in the
+ * (section "SMC Cycle Register"), and then stores the result in the
* @conf->setup field at @shift position.
*
* Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index fbe0f245ce8e..fe1811523e4a 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
+ .range_min = DA9062AA_BBAT_CONT,
+ .range_max = DA9062AA_BBAT_CONT,
+ }, {
.range_min = DA9062AA_INTERFACE,
.range_max = DA9062AA_CONFIG_E,
}, {
@@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
+ .range_min = DA9062AA_BBAT_CONT,
+ .range_max = DA9062AA_BBAT_CONT,
+ }, {
.range_min = DA9062AA_GP_ID_0,
.range_max = DA9062AA_GP_ID_19,
},
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8621a198a2ce..bac33311f55a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f811cd524468..e38a5f144373 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 64d5760d069a..63d6246d6dff 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
schedule_work(&scif_info.misc_work);
}
-static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
-}
-
static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
.release = scif_mmu_notifier_release,
.clear_flush_young = NULL,
- .invalidate_page = scif_mmu_notifier_invalidate_page,
.invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
.invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index e936d43895d2..9918eda0e05f 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
}
-static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address)
-{
- struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
- ms_notifier);
-
- STAT(mmu_invalidate_page);
- gru_flush_tlb_range(gms, address, PAGE_SIZE);
- gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
-}
-
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops gru_mmuops = {
- .invalidate_page = gru_invalidate_page,
.invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end,
.release = gru_release,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e5938c791330..8bd7aba811e9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1213,7 +1213,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret);
+ blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
-static bool mmc_blk_has_cmd_err(struct mmc_command *cmd)
+static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
- if (!cmd->error && cmd->resp[0] & CMD_ERRORS)
- cmd->error = -EIO;
+ u32 val;
+
+ /*
+ * Per the SD specification(physical layer version 4.10)[1],
+ * section 4.3.3, it explicitly states that "When the last
+ * block of user area is read using CMD18, the host should
+ * ignore OUT_OF_RANGE error that may occur even the sequence
+ * is correct". And JESD84-B51 for eMMC also has a similar
+ * statement on section 6.8.3.
+ *
+ * Multiple block read/write could be done by either predefined
+ * method, namely CMD23, or open-ending mode. For open-ending mode,
+ * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
+ *
+ * However the spec[1] doesn't tell us whether we should also
+ * ignore that for predefined method. But per the spec[1], section
+ * 4.15 Set Block Count Command, it says"If illegal block count
+ * is set, out of range error will be indicated during read/write
+ * operation (For example, data transfer is stopped at user area
+ * boundary)." In another word, we could expect a out of range error
+ * in the response for the following CMD18/25. And if argument of
+ * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
+ * we could also expect to get a -ETIMEDOUT or any error number from
+ * the host drivers due to missing data response(for write)/data(for
+ * read), as the cards will stop the data transfer by itself per the
+ * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
+ */
- return cmd->error;
+ if (!brq->stop.error) {
+ bool oor_with_open_end;
+ /* If there is no error yet, check R1 response */
+
+ val = brq->stop.resp[0] & CMD_ERRORS;
+ oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
+
+ if (val && !oor_with_open_end)
+ brq->stop.error = -EIO;
+ }
}
static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
@@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
* stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring.
*/
- if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
- brq->data.error) {
+
+ mmc_blk_eval_resp_error(brq);
+
+ if (brq->sbc.error || brq->cmd.error ||
+ brq->stop.error || brq->data.error) {
switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
@@ -1681,9 +1718,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
if (err)
req_pending = old_req_pending;
else
- req_pending = blk_end_request(req, 0, blocks << 9);
+ req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
} else {
- req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
+ req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
}
return req_pending;
}
@@ -2170,7 +2207,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
+ spin_lock_irq(md->queue.queue->queue_lock);
queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+ spin_unlock_irq(md->queue.queue->queue_lock);
blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4ffea14b7eb6..2bae69e39544 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1289,7 +1289,7 @@ out_err:
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- int err = 0;
+ int err = -EINVAL;
u8 val;
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 04ff3c97a535..2ab4788d021f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps |= mmc_pdata(host)->caps;
if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index bc1781bb070b..c580af05b033 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power_noreg(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
.set_clock = sdhci_set_clock,
+ .set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
.reset = xenon_reset,
.set_uhs_signaling = xenon_set_uhs_signaling,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f336a9b85576..9ec8f033ac5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
return BLK_STS_IOERR;
+ return BLK_STS_OK;
default:
return BLK_STS_IOERR;
}
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index d922a88e407f..ceec21bd30c4 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
* tRC < 30ns implies EDO mode. This controller does not support this
* mode.
*/
- if (conf->timings.sdr.tRC_min < 30)
+ if (conf->timings.sdr.tRC_min < 30000)
return -ENOTSUPP;
atmel_smc_cs_conf_init(smcconf);
@@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TADL_SHIFT,
ncycles);
- if (ret)
+ /*
+ * Version 4 of the ONFI spec mandates that tADL be at least 400
+ * nanoseconds, but, depending on the master clock rate, 400 ns may not
+ * fit in the tADL field of the SMC reg. We need to relax the check and
+ * accept the -ERANGE return code.
+ *
+ * Note that previous versions of the ONFI spec had a lower tADL_min
+ * (100 or 200 ns). It's not clear why this timing constraint got
+ * increased but it seems most NANDs are fine with values lower than
+ * 400ns, so we should be safe.
+ */
+ if (ret && ret != -ERANGE)
return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 55a8ee5306ea..8c210a5776bc 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
*/
struct platform_device *pdev = to_platform_device(userdev);
const struct atmel_pmecc_caps *caps;
+ const struct of_device_id *match;
/* No PMECC engine available. */
if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
caps = &at91sam9g45_caps;
- /*
- * Try to find the NFC subnode and extract the associated caps
- * from there.
- */
- np = of_find_compatible_node(userdev->of_node, NULL,
- "atmel,sama5d3-nfc");
- if (np) {
- const struct of_device_id *match;
-
- match = of_match_node(atmel_pmecc_legacy_match, np);
- if (match && match->data)
- caps = match->data;
-
- of_node_put(np);
- }
+ /* Find the caps associated to the NAND dev node. */
+ match = of_match_node(atmel_pmecc_legacy_match,
+ userdev->of_node);
+ if (match && match->data)
+ caps = match->data;
pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5fa5ddc94834..c6c18b82f8f4 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
if (!section) {
oobregion->offset = 0;
- oobregion->length = 4;
+ if (mtd->oobsize == 16)
+ oobregion->length = 4;
+ else
+ oobregion->length = 3;
} else {
+ if (mtd->oobsize == 8)
+ return -ERANGE;
+
oobregion->offset = 6;
oobregion->length = ecc->total - 4;
}
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
* Ensure the timing mode has been changed on the chip side
* before changing timings on the controller side.
*/
- if (chip->onfi_version) {
+ if (chip->onfi_version &&
+ (le16_to_cpu(chip->onfi_params.opt_cmd) &
+ ONFI_OPT_CMD_SET_GET_FEATURES)) {
u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
chip->onfi_timing_mode_default,
};
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
- * @cached: cached programming
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index f06312df3669..7e36d7d13c26 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
struct nand_sdr_timings *timings = &iface->timings.sdr;
/* microseconds -> picoseconds */
- timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
- timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
- timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+ timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
+ timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
+ timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
/* nanoseconds -> picoseconds */
timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 03a0d057bf2f..e4211c3cc49b 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2373,6 +2373,7 @@ static int __init ns_init_module(void)
return 0;
err_exit:
+ nandsim_debugfs_remove(nand);
free_nandsim(nand);
nand_release(nsmtd);
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index d0b6f8f9f297..6abd142b1324 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
*/
chip->clk_rate = NSEC_PER_SEC / min_clk_period;
real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+ if (real_clk_rate <= 0) {
+ dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+ return -EINVAL;
+ }
/*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9bee6c1c70cc..fc63992ab0e0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- if (bond_update_speed_duplex(new_slave))
+ if (bond_update_speed_duplex(new_slave) &&
+ bond_needs_speed_duplex(bond))
new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
@@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- if (bond_update_speed_duplex(slave)) {
+ if (bond_update_speed_duplex(slave) &&
+ bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
- netdev_warn(bond->dev,
- "failed to get link speed/duplex for %s\n",
- slave->dev->name);
+ if (net_ratelimit())
+ netdev_warn(bond->dev,
+ "failed to get link speed/duplex for %s\n",
+ slave->dev->name);
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 648f91b58d1e..9b6ce7c3f6c3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
.type = BCM7445_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_7445_reg_offsets,
+ .num_cfp_rules = 256,
};
static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
.type = BCM7278_DEVICE_ID,
.core_reg_align = 1,
.reg_offsets = bcm_sf2_7278_reg_offsets,
+ .num_cfp_rules = 128,
};
static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->type = data->type;
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
+ priv->num_cfp_rules = data->num_cfp_rules;
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 7d3030e04f11..7f9125eef3df 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 2fb32d67065f..8a1da7e67707 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
{
u32 reg;
- WARN_ON(addr >= CFP_NUM_RULES);
+ WARN_ON(addr >= priv->num_cfp_rules);
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
{
/* Entry #0 is reserved */
- return CFP_NUM_RULES - 1;
+ return priv->num_cfp_rules - 1;
}
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
if (!(reg & OP_STR_DONE))
break;
- } while (index < CFP_NUM_RULES);
+ } while (index < priv->num_cfp_rules);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
nfc->rule_cnt = bitmap_weight(priv->cfp.used,
- CFP_NUM_RULES) - 1;
+ priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
* all finished.
*/
mt7623_pad_clk_setup(ds);
+ } else {
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
+ u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ };
+
+ if (phydev->link)
+ mcr |= PMCR_FORCE_LNK;
+
+ if (phydev->duplex) {
+ mcr |= PMCR_FORCE_FDX;
+
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= PMCR_TX_FC_EN;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= PMCR_RX_FC_EN;
+ }
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr);
}
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
#define PMCR_TX_FC_EN BIT(5)
#define PMCR_RX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
+#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 86058a9f3417..6e253d913fe2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
-static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
{
int ret;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
- return 0;
+ return;
if (!IS_ENABLED(CONFIG_MDIO_XGENE))
- return 0;
+ return;
ret = xgene_enet_phy_connect(pdata->ndev);
if (!ret)
pdata->mdio_driver = true;
- return 0;
+ return;
}
static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,15 +1779,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- ret = xgene_enet_check_phy_handle(pdata);
- if (ret)
- return ret;
-
xgene_enet_gpiod_get(pdata);
- if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
/* Abort if the clock is defined but couldn't be
* retrived. Always abort if the clock is missing on
* DT system as the driver can't cope with this case.
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
+ xgene_enet_check_phy_handle(pdata);
+
ret = xgene_enet_init_hw(pdata);
if (ret)
- goto err;
+ goto err2;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
spin_lock_init(&pdata->stats_lock);
ret = xgene_extd_stats_init(pdata);
if (ret)
- goto err2;
+ goto err1;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
- goto err2;
+ goto err1;
}
return 0;
-err2:
+err1:
/*
* If necessary, free_netdev() will call netif_napi_del() and undo
* the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
*/
+ xgene_enet_delete_desc_rings(pdata);
+
+err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
-err1:
- xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index fce0fd3f23ff..bf9b3f020e10 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -105,8 +105,7 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+ int (*hw_get_link_status)(struct aq_hw_s *self);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 9ee1c5016784..6ac9e2602d6d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
else
cfg->vecs = 1U;
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
- struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
- err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+ err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err < 0)
goto err_exit;
- self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
-
- if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
- if (link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
- AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
- AQ_NIC_LINK_DOWN);
- netif_carrier_on(self->ndev);
- } else {
- netif_carrier_off(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
- }
+ self->link_status = self->aq_hw->aq_link_status;
- self->link_status = link_status;
+ self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+
+ if (self->link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ } else {
+ netif_carrier_off(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
-__releases(&ring->lock)
-__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
- unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
goto err_exit;
}
- do {
- if (spin_trylock(&ring->header.lock)) {
- frags = aq_nic_map_skb(self, skb, ring);
-
- if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(
- self->aq_hw,
- ring, frags);
- if (err >= 0) {
- if (aq_ring_avail_dx(ring) <
- AQ_CFG_SKB_FRAGS_MAX + 1)
- aq_nic_ndev_queue_stop(
- self,
- ring->idx);
-
- ++ring->stats.tx.packets;
- ring->stats.tx.bytes += skb->len;
- }
- } else {
- err = NETDEV_TX_BUSY;
- }
+ frags = aq_nic_map_skb(self, skb, ring);
- spin_unlock(&ring->header.lock);
- break;
- }
- } while (--trys);
+ if (likely(frags)) {
+ err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+ ring,
+ frags);
+ if (err >= 0) {
+ if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
+ aq_nic_ndev_queue_stop(self, ring->idx);
- if (!trys) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
err = NETDEV_TX_BUSY;
- goto err_exit;
}
err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
+
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+ break;
}
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+ /* Number of filters is too big: atlantic does not support this.
+ * Force all multi filter to support this.
+ * With this we disable all UC filters and setup "all pass"
+ * multicast mask
+ */
+ self->packet_filter |= IFF_ALLMULTI;
+ self->aq_hw->aq_nic_cfg->mc_list_count = 0;
+ return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ } else {
+ return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
+ }
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 9a0817938eca..ec5579fb8268 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
- spin_lock_init(&self->header.lock);
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index f6012b34abe6..e12bcdfb874a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
- spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ad5b4d4dac7f..fee446af748f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
-__releases(&self->lock)
-__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
- } else if (spin_trylock(&self->header.lock)) {
+ } else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
-
-err_exit:
- spin_unlock(&self->header.lock);
}
-
+err_exit:
return work_done;
}
@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- spin_lock_init(&self->header.lock);
-
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index faeb4935ef3e..c5a02df7a48b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1bceb7358e5c..21784cc39dab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8d6d8f5804da..4f5ec9a0fbfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
+
+ if (err < 0)
+ pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
+ AQ_CFG_DRV_NAME,
+ aq_hw_caps->fw_ver_expected,
+ aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
err_exit:;
}
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status)
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (!link_speed_mask) {
link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index a66aee51ab5b..e0360a6b2202 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
+ u64_stats_init(&bp->hw_stats.syncp);
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5333601f855f..c28fa5a8734c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
p = (char *)&dev->stats;
else
p = (char *)priv;
+
+ if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+
p += s->stat_offset;
data[j] = *(unsigned long *)p;
j++;
@@ -593,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
- dev_kfree_skb_any(cb->skb);
+ dev_consume_skb_any(cb->skb);
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0);
}
@@ -1342,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e7c8539cbddf..f20b3d2a4c23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
- }
- return rc;
#endif
}
@@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
+static int bnxt_init_mac_addr(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (BNXT_PF(bp)) {
+ memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+#endif
+ }
+ return rc;
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
-
+ rc = bnxt_init_mac_addr(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to initialize mac address.\n");
+ rc = -EADDRNOTAVAIL;
+ goto init_err_pci_clean;
+ }
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ if (ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a981c4ee9d72..fea3f9a5fb2d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (skb) {
pkts_compl++;
bytes_compl += GENET_CB(skb)->bytes_sent;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
txbds_processed++;
@@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
cb = ring->cbs + i;
skb = bcmgenet_rx_refill(priv, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ef4be781fd05..09ea62ee96d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -529,6 +529,7 @@ enum { /* adapter flags */
USING_SOFT_PARAMS = (1 << 6),
MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9),
+ ROOT_NO_RELAXED_ORDERING = (1 << 10),
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e403fa18f1b1..33bb8678833a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev)
dev->name, adap->params.vpd.id, adap->name, buf);
}
-static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
-{
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
-}
-
/*
* Free the following resources:
* - memory used for tables
@@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_enable_pcie_error_reporting(pdev);
- enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ede12209f20b..4ef68f69b58c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
+ int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
if (cong >= 0)
c.iqns_to_fl0congen |=
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 82bf7aac6cdb..0293b41171a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
/* Copy in the new mailbox command and send it on its way ... */
- t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
+ t4_record_mbox(adap, cmd, size, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
}
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 109bc630408b..08c6ddb84a04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -408,6 +408,7 @@ enum { /* adapter flags */
USING_MSI = (1UL << 1),
USING_MSIX = (1UL << 2),
QUEUES_BOUND = (1UL << 3),
+ ROOT_NO_RELAXED_ORDERING = (1UL << 4),
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ac7a150c54e9..2b85b874fd0d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
adapter->name = pci_name(pdev);
adapter->msg_enable = DFLT_MSG_ENABLE;
+
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
err = adap_init0(adapter);
if (err)
goto err_unmap_bar;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index e37dde2ba97f..05498e7f2840 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
+ int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
/*
* If we're using MSI interrupts and we're not initializing the
@@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
/* In T6, for egress queue type FL there is internal overhead
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 34dae51effd4..59da7ac3c108 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1863,7 +1863,6 @@ err_setup_mdio:
err_ioremap:
release_resource(priv->res);
err_req_mem:
- netif_napi_del(&priv->napi);
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 6e67d22fd0d5..1c7da16ad0ff 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
+ pdev->dev.of_node = node;
+ pdev->dev.parent = priv->dev;
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
static void send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
struct device *dev = &adapter->vdev->dev;
+ int rc;
do {
if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
dev_err(dev, "Capabilities query timeout\n");
return -1;
}
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's failed\n");
+ return -1;
+ }
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's irqs failed\n");
+ return -1;
+ }
}
reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
ibmvnic_send_req_caps(adapter, 1);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
/* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 48d21c1e09f2..4d598ca8503a 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
const char *dt_mac_addr;
const char *mac_from;
- char hw_mac_addr[ETH_ALEN];
+ char hw_mac_addr[ETH_ALEN] = {0};
u32 id;
int features;
int phy_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_caps *caps = &priv->mdev->dev->caps;
int err = 0;
u64 config = 0;
u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & mask)) {
+ if (!(caps->flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
}
+ if (caps->wol_port[priv->port])
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) {
en_err(priv, "Failed to get WoL information\n");
return;
}
- if (config & MLX4_EN_WOL_MAGIC)
- wol->supported = WAKE_MAGIC;
- else
- wol->supported = 0;
-
- if (config & MLX4_EN_WOL_ENABLED)
+ if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 436f7689a032..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
* header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW.
*/
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
- struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct iphdr *iph)
{
__u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0;
+ __u8 ipproto = iph->protocol;
+
+ if (unlikely(ipproto == IPPROTO_SCTP))
+ return -1;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
- length_for_csum, iph->protocol, 0);
+ length_for_csum, ipproto, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
+ __u8 nexthdr = ipv6h->nexthdr;
__wsum csum_pseudo_hdr = 0;
- if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
- ipv6h->nexthdr == IPPROTO_HOPOPTS))
+ if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+ nexthdr == IPPROTO_HOPOPTS ||
+ nexthdr == IPPROTO_SCTP))
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+ (__force __wsum)htons(nexthdr));
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
}
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
- else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
- return -1;
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+ return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)",
- [35] = "QinQ VST mode support",
- [36] = "sl to vl mapping table change event support"
+ [35] = "Diag counters per port",
+ [36] = "QinQ VST mode support",
+ [37] = "sl to vl mapping table change event support",
};
int i;
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
+#define QUERY_DEV_CAP_WOL_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
+ dev_cap->wol_port[1] = !!(field & 0x20);
+ dev_cap->wol_port[2] = !!(field & 0x40);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+ bool wol_port[MLX4_MAX_PORTS + 1];
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..5fe5cdc51357 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,13 +424,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ dev->caps.wol_port[1] = dev_cap->wol_port[1];
+ dev->caps.wol_port[2] = dev_cap->wol_port[2];
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
/* Virtual PCI function needs to determine UAR page size from
* firmware. Only master PCI function can set the uar page size
*/
- if (enable_4k_uar)
+ if (enable_4k_uar || !dev->persist->num_vfs)
dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
else
dev->uar_page_shift = PAGE_SHIFT;
@@ -2275,7 +2277,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
- if (enable_4k_uar) {
+ if (enable_4k_uar || !dev->persist->num_vfs) {
init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0039b4725405..2f26fb34d741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 cap;
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2eb54d36e16e..c1d384fca4dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_dcbx *dcbx = &priv->dcbx;
- u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
-
- if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
- mode |= DCB_CAP_DCBX_HOST;
- return mode;
+ return priv->dcbx.cap;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
/* set dcbx to fw controlled */
if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+ dcbx->cap &= ~DCB_CAP_DCBX_HOST;
return 0;
}
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
+ dcbx->cap = mode;
+
return 0;
}
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = (DCB_CAP_DCBX_LLD_MANAGED |
- DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_STATIC);
+ *cap = priv->dcbx.cap |
+ DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
break;
default:
*cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (!MLX5_CAP_GEN(priv->mdev, qos))
+ return;
+
if (MLX5_CAP_GEN(priv->mdev, dcbx))
mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+ priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
+ if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 917fade5f5d5..f5594014715b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->mdev,
+ new_channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 57f31fa478ce..6ad7f07e7861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
+ param->cq_period_mode = params->rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 325b2c8c1c6d..7344433259fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
if (unlikely(!page))
return -ENOMEM;
- dma_info->page = page;
dma_info->addr = dma_map_page(rq->pdev, page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page);
return -ENOMEM;
}
+ dma_info->page = page;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3c536f560dd2..7f282e8f4e7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
- dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
- ret = dst->error;
- if (ret) {
- dst_release(dst);
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
return ret;
- }
*out_ttl = ip6_dst_hoplimit(dst);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index aaa0f4ebba9a..31353e5c3c78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
return mlx5e_skb_l2_header_offset(skb);
}
-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
- struct sk_buff *skb)
+static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ struct sk_buff *skb)
{
- int hlen;
+ u16 hlen;
switch (mode) {
case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
- return hlen;
+ break;
case MLX5_INLINE_MODE_IP:
/* When transport header is set to zero, it means no transport
* header. When transport header is set to 0xff's, it means
* transport header wasn't set.
*/
- if (skb_transport_offset(skb))
- return mlx5e_skb_l3_header_offset(skb);
+ if (skb_transport_offset(skb)) {
+ hlen = mlx5e_skb_l3_header_offset(skb);
+ break;
+ }
/* fall through */
case MLX5_INLINE_MODE_L2:
default:
- return mlx5e_skb_l2_header_offset(skb);
+ hlen = mlx5e_skb_l2_header_offset(skb);
}
+ return min_t(u16, hlen, skb->len);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 95b64025ce36..5bc0593bd76e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
struct mlx5_eswitch_rep *rep;
int vport;
- for (vport = 0; vport < nvports; vport++) {
+ for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c065132b956d..16885827367b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
- clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
if (cleanup)
@@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
@@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
int err;
dev_info(&pdev->dev, "Shutdown was called\n");
- /* Notify mlx5 clients that the kernel is being shut down */
- set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..520f6382dfde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- /* arm_srq structs missing using identical xrc ones */
- u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
+ MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 60bf8f27cc00..c6a3e61b53bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
if (!info->linking)
break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
+ if (!info->linking)
+ break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..d39ffbfcc436 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_UC,
@@ -705,21 +705,28 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_port_mc_router)
{
struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
if (!bridge_port->bridge_device->multicast_enabled)
- return 0;
+ goto out;
- return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- is_port_mc_router);
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+ if (err)
+ return err;
+
+out:
+ bridge_port->mrouter = is_port_mc_router;
+ return 0;
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1283,15 +1290,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1407,15 +1414,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1974,6 +1981,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
}
+static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mid *mid, *tmp;
+
+ list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
+ list_del(&mid->list);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ kfree(mid);
+ }
+}
+
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2014,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index dd7fa9cf225f..b0837b58c3a1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
return;
}
- if (link) {
+ if (link)
netif_carrier_on(netdev);
- rtnl_lock();
- dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
- rtnl_unlock();
- } else {
+ else
netif_carrier_off(netdev);
- }
rcu_read_unlock();
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 0e08404480ef..d25b5038c3a2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
+ memset(frame, 0, sizeof(struct nfp_flower_meta_two));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
- if (mask_version) {
- frame->tci = cpu_to_be16(~0);
- return;
- }
-
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- flow->key);
-
- /* Populate the tci field. */
- if (!flow_vlan->vlan_id) {
- tmp_tci = 0;
- } else {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- flow_vlan->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- flow_vlan->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ target);
+ /* Populate the tci field. */
+ if (flow_vlan->vlan_id) {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ flow_vlan->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ flow_vlan->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ frame->tci = cpu_to_be16(tmp_tci);
+ }
}
- frame->tci = cpu_to_be16(tmp_tci);
}
static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_eth_addrs *flow_mac;
-
- flow_mac = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- target);
+ struct flow_dissector_key_eth_addrs *addr;
memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
- /* Populate mac frame. */
- ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
- ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target);
+ /* Populate mac frame. */
+ ether_addr_copy(frame->mac_dst, &addr->dst[0]);
+ ether_addr_copy(frame->mac_src, &addr->src[0]);
+ }
if (mask_version)
frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ports *flow_tp;
+ struct flow_dissector_key_ports *tp;
- flow_tp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- target);
+ memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
- frame->port_src = flow_tp->src;
- frame->port_dst = flow_tp->dst;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ tp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target);
+ frame->port_src = tp->src;
+ frame->port_dst = tp->dst;
+ }
}
static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *flow_ipv4;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv4 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
-
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
+ struct flow_dissector_key_ipv4_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- /* Populate IPv4 frame. */
- frame->reserved = 0;
- frame->ipv4_src = flow_ipv4->src;
- frame->ipv4_dst = flow_ipv4->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard TOS/TTL for now. */
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+ frame->ipv4_src = addr->src;
+ frame->ipv4_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv6_addrs *flow_ipv6;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv6 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- target);
+ struct flow_dissector_key_ipv6_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
-
- /* Populate IPv6 frame. */
- frame->reserved = 0;
- frame->ipv6_src = flow_ipv6->src;
- frame->ipv6_dst = flow_ipv6->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard LABEL/TOS/TTL for now. */
- frame->ipv6_flow_label_exthdr = 0;
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv6));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target);
+ frame->ipv6_src = addr->src;
+ frame->ipv6_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 4ad10bd5e139..74a96d6bb05c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -105,43 +105,62 @@ static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow)
{
- struct flow_dissector_key_control *mask_enc_ctl;
- struct flow_dissector_key_basic *mask_basic;
- struct flow_dissector_key_basic *key_basic;
+ struct flow_dissector_key_basic *mask_basic = NULL;
+ struct flow_dissector_key_basic *key_basic = NULL;
+ struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
- mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->mask);
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_control *mask_enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->mask);
+ /* We are expecting a tunnel. For now we ignore offloading. */
+ if (mask_enc_ctl->addr_type)
+ return -EOPNOTSUPP;
+ }
- mask_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->mask);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ mask_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->mask);
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
+ mask_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ flow->mask);
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
key_size = sizeof(struct nfp_flower_meta_one) +
sizeof(struct nfp_flower_in_port) +
sizeof(struct nfp_flower_mac_mpls);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
- return -EOPNOTSUPP;
-
- if (mask_basic->n_proto) {
+ if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
+ /* Currently we do not offload MPLS. */
+ case cpu_to_be16(ETH_P_MPLS_UC):
+ case cpu_to_be16(ETH_P_MPLS_MC):
+ return -EOPNOTSUPP;
+
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
}
}
- if (mask_basic->ip_proto) {
+ if (mask_basic && mask_basic->ip_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->ip_proto) {
case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index d67969d3e484..3f199db2002e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
- mutex_lock(&pf->lock);
-
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
- err = -EINVAL;
- goto err_unlock;
+ return -EINVAL;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
- goto err_unlock;
+ return err;
}
+ mutex_lock(&pf->lock);
+
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return num_vfs;
err_sriov_disable:
- pci_disable_sriov(pdev);
-err_unlock:
mutex_unlock(&pf->lock);
+ pci_disable_sriov(pdev);
return err;
#endif
return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
+ mutex_unlock(&pf->lock);
+
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
-
- mutex_unlock(&pf->lock);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..66a09e490cf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
tx_ring->idx = idx;
tx_ring->r_vec = r_vec;
tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->idx = idx;
rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
@@ -893,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_sent_queue(nd_q, txbuf->real_len);
+ skb_tx_timestamp(skb);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -901,13 +905,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb->xmit_more || netif_xmit_stopped(nd_q))
nfp_net_tx_xmit_more_flush(tx_ring);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
err_unmap:
- --f;
- while (f >= 0) {
+ while (--f >= 0) {
frag = &skb_shinfo(skb)->frags[f];
dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
@@ -1750,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
if (likely(!meta.portid)) {
netdev = dp->netdev;
} else {
@@ -1758,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid);
if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 5797dbf2b507..34b985384d26 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
- err = nfp_net_pf_app_start_ctrl(pf);
- if (err)
- return err;
-
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
- goto err_ctrl_stop;
+ return err;
if (pf->num_vfs) {
err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
err_app_stop:
nfp_app_stop(pf->app);
-err_ctrl_stop:
- nfp_net_pf_app_stop_ctrl(pf);
return err;
}
@@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
if (pf->num_vfs)
nfp_app_sriov_disable(pf->app);
nfp_app_stop(pf->app);
- nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -559,7 +552,7 @@ err_unmap_ctrl:
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
@@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
+ struct nfp_net *nn;
int stride;
int err;
@@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_free_vnics;
- err = nfp_net_pf_app_start(pf);
+ err = nfp_net_pf_app_start_ctrl(pf);
if (err)
goto err_free_irqs;
@@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
+ err = nfp_net_pf_app_start(pf);
+ if (err)
+ goto err_clean_vnics;
+
mutex_unlock(&pf->lock);
return 0;
+err_clean_vnics:
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
err_stop_app:
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
@@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
if (list_empty(&pf->vnics))
goto out;
+ nfp_net_pf_app_stop(pf);
+
list_for_each_entry(nn, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 66ff15d08bad..0a66389c06c2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2311,7 +2311,7 @@ netxen_md_rdqueue(struct netxen_adapter *adapter,
loop_cnt++) {
NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
read_addr = queueEntry->read_addr;
- for (k = 0; k < read_cnt; k--) {
+ for (k = 0; k < read_cnt; k++) {
NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
&read_value);
*data_buff++ = read_value;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
- if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
goto err;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 28ea0af89aef..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bd07a15d3b7c..e03fcf914690 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb_any(tx_skb->skb);
+ dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 73427e29df2a..fbd00cb0cb7d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
if (!dma_cfg)
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index c905971c5f3a..990a63d7fcb7 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -938,7 +938,6 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+ }
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 17d4bbaeb65c..6e359572b9f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk) {
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index fffd6d5fc907..39c2122a4f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
static const struct of_device_id sun8i_dwmac_match[] = {
- { .compatible = "allwinner,sun8i-h3-emac",
- .data = &emac_variant_h3 },
- { .compatible = "allwinner,sun8i-v3s-emac",
- .data = &emac_variant_v3s },
- { .compatible = "allwinner,sun8i-a83t-emac",
- .data = &emac_variant_a83t },
- { .compatible = "allwinner,sun50i-a64-emac",
- .data = &emac_variant_a64 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index db157a47000c..72ec711fcba2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
+ struct device *dev = ndev->dev.parent;
int addr, found;
if (!mdio_bus_data)
@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev)
else
err = mdiobus_register(new_bus);
if (err != 0) {
- netdev_err(ndev, "Cannot register the MDIO bus\n");
+ dev_err(dev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
@@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev)
irq_str = irq_num;
break;
}
- netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
- phydev->phy_id, addr, irq_str, phydev_name(phydev),
- act ? " active" : "");
+ phy_attached_info(phydev);
found = 1;
}
if (!found && !mdio_node) {
- netdev_warn(ndev, "No PHY found\n");
+ dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
mdiobus_free(new_bus);
return -ENODEV;
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 56ba411421f0..38d1cc557c11 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
if (of_machine_is_compatible("ti,dra7"))
return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
- dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ dev_info(dev, "incompatible machine/device type for reading mac address\n");
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
#include "cpts.h"
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+ unsigned long tmo;
+};
+
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+ u16 ts_seqid, u8 ts_msgtype);
+
static int event_expired(struct cpts_event *event)
{
return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff *skb, *tmp;
+ u16 seqid;
+ u8 mtype;
+ bool found = false;
+
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ unsigned int class = ptp_classify_raw(skb);
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (cpts_match(skb, class, seqid, mtype)) {
+ u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ } else if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ return found;
+}
+
/*
* Returns zero if matching event type was found.
*/
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
event->low = lo;
type = event_type(event);
switch (type) {
+ case CPTS_EV_TX:
+ if (cpts_match_tx_ts(cpts, event)) {
+ /* if the new event matches an existing skb,
+ * then don't queue it
+ */
+ break;
+ }
case CPTS_EV_PUSH:
case CPTS_EV_RX:
- case CPTS_EV_TX:
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ unsigned long delay = cpts->ov_check_period;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ return (long)delay;
+}
+
static struct ptp_clock_info cpts_info = {
.owner = THIS_MODULE,
.name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
.gettime64 = cpts_ptp_gettime,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
+ .do_aux_work = cpts_overflow_check,
};
-static void cpts_overflow_check(struct work_struct *work)
-{
- struct timespec64 ts;
- struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
-
- cpts_ptp_gettime(&cpts->info, &ts);
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
-}
-
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
u16 ts_seqid, u8 ts_msgtype)
{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
return 0;
spin_lock_irqsave(&cpts->lock, flags);
- cpts_fifo_read(cpts, CPTS_EV_PUSH);
+ cpts_fifo_read(cpts, -1);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
break;
}
}
+
+ if (ev_type == CPTS_EV_TX && !ns) {
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+ /* Not found, add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ __skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
{
int err, i;
+ skb_queue_head_init(&cpts->txq);
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
}
cpts->phc_index = ptp_clock_index(cpts->clock);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
+ ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
return 0;
err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
if (WARN_ON(!cpts->clock))
return;
- cancel_delayed_work_sync(&cpts->overflow_work);
-
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
+ /* Drop all packet */
+ skb_queue_purge(&cpts->txq);
+
clk_disable(cpts->refclk);
}
EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
spin_lock_init(&cpts->lock);
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
ret = cpts_of_parse(cpts, node);
if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
- struct delayed_work overflow_work;
int phc_index;
struct clk *refclk;
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
+ struct sk_buff_head txq;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de8156c6b292..2bbda71818ad 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_GENEVE_ID]) {
__u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
- if (vni >= GENEVE_VID_MASK)
+ if (vni >= GENEVE_N_VID)
return -ERANGE;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
gtp->dev = dev;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
- refcount_t sc_offered;
+ atomic_t open_chn;
+ wait_queue_head_t subchan_open;
struct rndis_device *extension;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait);
+ init_waitqueue_head(&net_device->subchan_open);
return net_device;
}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->channel = device->channel;
+ u64_stats_init(&nvchan->tx_stats.syncp);
+ u64_stats_init(&nvchan->rx_stats.syncp);
}
/* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0d78727f1a14..d91cbc6c3ca4 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- rtnl_lock();
+ /* if changes are happening, comeback later */
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+ return;
+ }
+
net_device = rtnl_dereference(ndev_ctx->nvdev);
if (!net_device)
goto out_unlock;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
else
netif_napi_del(&nvchan->napi);
- if (refcount_dec_and_test(&nvscdev->sc_offered))
- complete(&nvscdev->channel_init_wait);
+ atomic_inc(&nvscdev->open_chn);
+ wake_up(&nvscdev->subchan_open);
}
int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
- refcount_set(&net_device->sc_offered, 0);
-
net_device->extension = rndis_device;
rndis_device->ndev = net;
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn);
+ atomic_set(&net_device->open_chn, 1);
num_rss_qs = net_device->num_chn - 1;
if (num_rss_qs == 0)
return 0;
- refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto out;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
ret = -ENODEV;
goto out;
}
- wait_for_completion(&net_device->channel_init_wait);
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+ /* wait for all sub channels to open */
+ wait_event(net_device->subchan_open,
+ atomic_read(&net_device->open_chn) == net_device->num_chn);
+
/* ignore failues from setting rss parameters, still have channels */
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
netdev_lockdep_set_classes(dev);
- ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+ ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5e1ab1160856..98e4deaa3a6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
module_exit(macsec_exit);
MODULE_ALIAS_RTNL_LINK("macsec");
+MODULE_ALIAS_GENL_FAMILY("macsec");
MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5068c582d502..d0626bf5c540 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1790f7fec125..2f742ae5b92e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+
if (!fmt) {
dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
} else {
va_list ap;
dev_info(&phydev->mdio.dev, ATTACHED_FMT,
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
va_start(ap, fmt);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index bd4303944e44..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
- ppp_xmit_process(ppp);
- read_unlock(&pch->upl);
+ __ppp_xmit_process(ppp);
}
}
static void ppp_channel_push(struct channel *pch)
{
- local_bh_disable();
-
- __ppp_channel_push(pch);
-
- local_bh_enable();
+ read_lock_bh(&pch->upl);
+ if (pch->ppp) {
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+ __ppp_channel_push(pch);
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+ } else {
+ __ppp_channel_push(pch);
+ }
+ read_unlock_bh(&pch->upl);
}
/*
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 32ad87345f57..0a2c0a42283f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1879,6 +1879,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err_detach:
tun_detach_all(dev);
+ /* register_netdevice() already called tun_free_netdev() */
+ goto err_free_dev;
+
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx);
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
+void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+ /* Reset the variables that have a lifetime outside of
+ * asix_rx_fixup_internal() so that future processing starts from a
+ * known set of initial conditions.
+ */
+
+ if (rx->ax_skb) {
+ /* Discard any incomplete Ethernet frame in the netdev buffer */
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+
+ /* Assume the Data header 32-bit word is at the start of the current
+ * or next URB socket buffer so reset all the state variables.
+ */
+ rx->remaining = 0;
+ rx->split_head = false;
+ rx->header = 0;
+}
+
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx)
{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
rx->remaining);
- if (rx->ax_skb) {
- kfree_skb(rx->ax_skb);
- rx->ax_skb = NULL;
- /* Discard the incomplete netdev Ethernet frame
- * and assume the Data header is at the start of
- * the current URB socket buffer.
- */
- }
- rx->remaining = 0;
+ reset_asix_rx_fixup_info(rx);
}
}
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
rx->header, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (rx->ax_skb) {
skb_put_data(rx->ax_skb, skb->data + offset,
copy_length);
- if (!rx->remaining)
+ if (!rx->remaining) {
usbnet_skb_return(dev, rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
}
offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
skb->len, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
return asix_rx_fixup_internal(dev, skb, rx);
}
+void asix_rx_fixup_common_free(struct asix_common_private *dp)
+{
+ struct asix_rx_fixup_info *rx;
+
+ if (!dp)
+ return;
+
+ rx = &dp->rx_fixup_info;
+
+ if (rx->ax_skb) {
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+}
+
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ asix_rx_fixup_common_free(dev->driver_priv);
kfree(dev->driver_priv);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8f572b9f3625..9c80e80c5493 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_noarp_info,
},
+ /* u-blox TOBY-L4 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */
lan78xx_init_ltm(dev);
- dev->net->hard_header_len += TX_OVERHEAD;
- dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
if (dev->udev->speed == USB_SPEED_SUPER) {
buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
return ret;
}
+ dev->net->hard_header_len += TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
/* Init all registers */
ret = lan78xx_reset(dev);
- lan78xx_mdio_init(dev);
+ ret = lan78xx_mdio_init(dev);
dev->net->flags |= IFF_MULTICAST;
pdata->wol = WAKE_MAGIC;
- return 0;
+ return ret;
}
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
udev = usb_get_dev(udev);
- ret = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct lan78xx_net));
if (!netdev) {
- dev_err(&intf->dev, "Error: OOM\n");
- goto out1;
+ dev_err(&intf->dev, "Error: OOM\n");
+ ret = -ENOMEM;
+ goto out1;
}
/* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out2;
+ goto out3;
}
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
static void qmi_wwan_disconnect(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct qmi_wwan_state *info = (void *)&dev->data;
+ struct qmi_wwan_state *info;
struct list_head *iter;
struct net_device *ldev;
+ /* called twice if separate control and data intf */
+ if (!dev)
+ return;
+ info = (void *)&dev->data;
if (info->flags & QMI_WWAN_FLAG_MUX) {
if (!rtnl_trylock()) {
restart_syscall();
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 98f17b05c68b..b06169ea60dc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
out:
skb_gro_remcsum_cleanup(skb, &grc);
+ skb->remcsum_offload = 0;
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index d21258d277ce..f1b60740e020 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
- brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
- &gscan_cfg, sizeof(gscan_cfg));
+ if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
+ brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
+ "pfn_gscan_cfg",
+ &gscan_cfg, sizeof(gscan_cfg));
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index b4ecd1fe1374..97208ce19f92 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0fa8c473f1e2..c73a6438ce8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
+ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c52623cb7c2a..d19c74827fbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
* (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
- * step. Supported only in integrated solutions.
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+ * next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@@ -330,7 +330,7 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const char *fw_name_pre_next_step;
+ const char *fw_name_pre_b_or_c_step;
const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6fdb5921e17f..4e0f86fe0a6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
- fw_pre_name = cfg->fw_name_pre_next_step;
+ (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+ fw_pre_name = cfg->fw_name_pre_b_or_c_step;
else if (drv->trans->cfg->integrated &&
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
cfg->fw_name_pre_rf_next_step)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 5c08f4d40f6a..3ee6767392b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc)
{
int ch_idx;
- u16 ch_flags, prev_ch_flags = 0;
+ u16 ch_flags;
+ u32 reg_rule_flags, prev_reg_rule_flags = 0;
const u8 *nvm_chan = cfg->ext_nvm ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
@@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
continue;
}
+ reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+ ch_flags, cfg);
+
/* we can't continue the same rule */
- if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
center_freq - prev_center_freq > 20) {
valid_rules++;
new_rule = true;
@@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
rule->power_rule.max_eirp =
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
- rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ rule->flags = reg_rule_flags;
/* rely on auto-calculation to merge BW of contiguous chans */
rule->flags |= NL80211_RRF_AUTO_BW;
rule->freq_range.max_bandwidth_khz = 0;
- prev_ch_flags = ch_flags;
prev_center_freq = center_freq;
+ prev_reg_rule_flags = reg_rule_flags;
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+ "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
center_freq,
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
@@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
CHECK_AND_PRINT_I(160MHZ),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
- ch_flags,
+ ch_flags, reg_rule_flags,
((ch_flags & NVM_CHANNEL_ACTIVE) &&
!(ch_flags & NVM_CHANNEL_RADAR))
- ? "" : "not ");
+ ? "Ad-Hoc" : "");
}
regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 79e7a7a285dc..82863e9273eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
entry = &wifi_pkg->package.elements[idx++];
if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX))
- return -EINVAL;
+ (entry->integer.value > U8_MAX)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
mvm->geo_profiles[i].values[j] = entry->integer.value;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index c7b1e58e3384..ce901be5fba8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2597,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
- while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC
+ * queues, so we should never get a second deferred
+ * frame for the RA/TID.
+ */
+ iwl_mvm_start_mac_queues(mvm, info->hw_queue);
ieee80211_free_txskb(mvm->hw, skb);
+ }
}
spin_unlock_bh(&mvm_sta->lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 65beca3a457a..8999a1199d60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len,
reduced_txp);
@@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
- rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
@@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
continue;
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success,
reduced_txp);
rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index f3e608196369..71c8b800ffa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid_data = rcu_dereference(mvm->baid_map[baid]);
if (!baid_data) {
- WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
- "Received baid %d, but no data exists for this BAID\n",
- baid);
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder);
return false;
}
@@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
data = rcu_dereference(mvm->baid_map[baid]);
if (!data) {
- WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder_data);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ab66b4394dfc..027ee5e72172 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
.add_modify = update ? 1 : 0,
.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
- STA_FLG_MIMO_EN_MSK),
+ STA_FLG_MIMO_EN_MSK |
+ STA_FLG_RTS_MIMO_PROT),
.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
};
int ret;
@@ -290,8 +291,8 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
- sta->addr, ba_data->tid);
+ ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+ sta->addr, ba_data->tid);
unlock:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 60360ed73f26..5fcc9dd6be56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
else
udp_hdr(skb)->check = 0;
- /* mac header len should include IV, size is in words */
- if (info->control.hw_key)
+ /*
+ * mac header len should include IV, size is in words unless
+ * the IV is added by the firmware like in WEP.
+ * In new Tx API, the IV is always added by the firmware.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
mh_len += info->control.hw_key->iv_len;
mh_len /= 2;
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_tid_data *tid_data;
struct iwl_mvm_sta *mvmsta;
+ ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f16c1bb9bf94..84f4ba01e14f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
@@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index fa315d84e98e..a1ea9ef97ed9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 351c4423125a..942736d3fa75 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
def_rxq = trans_pcie->rxq;
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
}
cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
iwl_pcie_free_rbs_pool(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f95eec52508e..3927bbf04f72 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
+ if (trans_pcie->rba.alloc_wq) {
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+ trans_pcie->rba.alloc_wq = NULL;
+ }
+
if (trans_pcie->msix_enabled) {
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
irq_set_affinity_hint(
@@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 08f0477f78d9..9915d83a4a30 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 9a03c5871efe..f58d8e305323 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -924,10 +924,8 @@ out1:
ntb_free_mw(nt, i);
/* if there's an actual failure, we should just bail */
- if (rc < 0) {
- ntb_link_disable(ndev);
+ if (rc < 0)
return;
- }
out:
if (ntb_link_is_up(ndev, NULL, NULL) == 1)
@@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
int node;
int rc, i;
- mw_count = ntb_mw_count(ndev, PIDX);
+ mw_count = ntb_peer_mw_count(ndev);
if (!ndev->ops->mw_set_trans) {
dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index f002bf48a08d..a69815c45ce6 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
tc->ntb = ntb;
init_waitqueue_head(&tc->link_wq);
- tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS);
+ tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
for (i = 0; i < tc->mw_count; i++) {
rc = tool_init_mw(tc, i);
if (rc)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c49f1f8b2e57..37046ac2c441 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32(sizeof(*s));
+ c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc);
}
-static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{
/*
* APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* then don't do anything.
*/
if (!ctrl->apsta)
- return;
+ return 0;
if (ctrl->npss > 31) {
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
- return;
+ return 0;
}
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
- return;
+ return 0;
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
kfree(table);
+ return ret;
}
static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* In fabrics we need to verify the cntlid matches the
* admin connect
*/
- if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
ret = -EINVAL;
+ goto out_free;
+ }
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
+ goto out_free;
}
} else {
ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
- nvme_configure_apst(ctrl);
- nvme_configure_directives(ctrl);
+ ret = nvme_configure_apst(ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = nvme_configure_directives(ctrl);
+ if (ret < 0)
+ return ret;
ctrl->identified = true;
+ return 0;
+
+out_free:
+ kfree(id);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui);
- while (ctrl->serial[serial_len - 1] == ' ')
+ while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
+ ctrl->serial[serial_len - 1] == '\0'))
serial_len--;
- while (ctrl->model[model_len - 1] == ' ')
+ while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
+ ctrl->model[model_len - 1] == '\0'))
model_len--;
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2e582a240943..5f5cd306f76d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
- if (opt_tokens[i].token & ~allowed_opts) {
+ if ((opt_tokens[i].token & opts->mask) &&
+ (opt_tokens[i].token & ~allowed_opts)) {
pr_warn("invalid parameter '%s'\n",
opt_tokens[i].pattern);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cd888a47d0fc..ea892e732268 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -109,6 +109,7 @@ struct nvme_dev {
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
+ dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
};
@@ -801,6 +802,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
return;
}
+ nvmeq->cqe_seen = 1;
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
nvme_end_request(req, cqe->status, cqe->result);
}
@@ -830,10 +832,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq)
consumed++;
}
- if (consumed) {
+ if (consumed)
nvme_ring_cq_doorbell(nvmeq);
- nvmeq->cqe_seen = 1;
- }
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -1558,26 +1558,18 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) {
iounmap(dev->cmb);
dev->cmb = NULL;
- if (dev->cmbsz) {
- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
- }
+ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL);
+ dev->cmbsz = 0;
}
}
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
- size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+ u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c;
- u64 dma_addr;
int ret;
- dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, dma_addr))
- return -ENOMEM;
-
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1594,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
"failed to set host mem (err %d, flags %#x).\n",
ret, bits);
}
- dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret;
}
@@ -1612,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
- kfree(dev->host_mem_descs);
+ dma_free_coherent(dev->dev,
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
}
@@ -1620,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, len;
+ dma_addr_t descs_dma;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1630,7 +1624,8 @@ retry:
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
max_entries = tmp;
- descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+ descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
@@ -1664,6 +1659,7 @@ retry:
dev->nr_host_mem_descs = i;
dev->host_mem_size = size;
dev->host_mem_descs = descs;
+ dev->host_mem_descs_dma = descs_dma;
dev->host_mem_desc_bufs = bufs;
return 0;
@@ -1677,7 +1673,8 @@ out_free_bufs:
kfree(bufs);
out_free_descs:
- kfree(descs);
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
out:
/* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
@@ -1953,16 +1950,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
/*
* CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Note that we add the
- * CMB attribute to the nvme_ctrl kobj which removes the need to remove
- * it on exit. Since nvme_dev_attrs_group has no name we can pass
- * NULL as final argument to sysfs_add_file_to_group.
+ * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
+ * has no name we can pass NULL as final argument to
+ * sysfs_add_file_to_group.
*/
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
-
- if (dev->cmbsz) {
+ if (dev->cmb) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
dev_warn(dev->ctrl.device,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index da04df1af231..a03299d77922 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr;
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ /*
+ * Align the MR to a 4K page size to match the ctrl page size and
+ * the block virtual boundary.
+ */
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (nr < count) {
if (nr < 0)
return nr;
@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
goto out_cleanup_queue;
ctrl->ctrl.max_hw_sectors =
- (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+ (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
error = nvme_init_identify(&ctrl->ctrl);
if (error)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2d7a98ab53fb..a53bb6635b83 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
- memset(id->mn, ' ', sizeof(id->mn));
- strncpy((char *)id->mn, "Linux", sizeof(id->mn));
-
- memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
-
id->rab = 6;
/*
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 31ca55dfcb1d..309c84aa7595 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
struct kref ref;
};
+struct nvmet_fc_defer_fcp_req {
+ struct list_head req_list;
+ struct nvmefc_tgt_fcp_req *fcp_req;
+};
+
struct nvmet_fc_tgt_queue {
bool ninetypercent;
u16 qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
+ struct list_head pending_cmd_list;
+ struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
} __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -385,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
{
- static struct nvmet_fc_ls_iod *iod;
+ struct nvmet_fc_ls_iod *iod;
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
@@ -462,10 +471,10 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{
- static struct nvmet_fc_fcp_iod *fod;
- unsigned long flags;
+ struct nvmet_fc_fcp_iod *fod;
+
+ lockdep_assert_held(&queue->qlock);
- spin_lock_irqsave(&queue->qlock, flags);
fod = list_first_entry_or_null(&queue->fod_list,
struct nvmet_fc_fcp_iod, fcp_list);
if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
* will "inherit" that reference.
*/
}
- spin_unlock_irqrestore(&queue->qlock, flags);
return fod;
}
static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+ else
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
fcpreq->nvmet_fc_private = NULL;
- spin_lock_irqsave(&queue->qlock, flags);
- list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod->active = false;
fod->abort = false;
fod->aborted = false;
fod->writedataactive = false;
fod->fcpreq = NULL;
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp) {
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /* Release reference taken at queue lookup and fod allocation */
+ nvmet_fc_tgt_q_put(queue);
+ return;
+ }
+
+ /* Re-use the fod for the next pending cmd that was deferred */
+ list_del(&deferfcp->req_list);
+
+ fcpreq = deferfcp->fcp_req;
+
+ /* deferfcp can be reused for another IO at a later date */
+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
spin_unlock_irqrestore(&queue->qlock, flags);
+ /* Save NVME CMD IO in fod */
+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+ /* Setup new fcpreq to be processed */
+ fcpreq->rspaddr = NULL;
+ fcpreq->rsplen = 0;
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ fod->active = true;
+
+ /* inform LLDD IO is now being processed */
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+ /* Submit deferred IO for processing */
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
/*
- * release the reference taken at queue lookup and fod allocation
+ * Leave the queue lookup get reference taken when
+ * fod was originally allocated.
*/
- nvmet_fc_tgt_q_put(queue);
-
- tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
}
static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
+ INIT_LIST_HEAD(&queue->avail_defer_list);
+ INIT_LIST_HEAD(&queue->pending_cmd_list);
atomic_set(&queue->connected, 0);
atomic_set(&queue->sqtail, 0);
atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
int i, writedataactive;
bool disconnect;
@@ -666,6 +733,36 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
}
}
}
+
+ /* Cleanup defer'ed IOs in queue */
+ list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+ req_list) {
+ list_del(&deferfcp->req_list);
+ kfree(deferfcp);
+ }
+
+ for (;;) {
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp)
+ break;
+
+ list_del(&deferfcp->req_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ kfree(deferfcp);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
spin_unlock_irqrestore(&queue->qlock, flags);
flush_workqueue(queue->work_q);
@@ -2172,11 +2269,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
* layer for processing.
*
- * The nvmet-fc layer will copy cmd payload to an internal structure for
- * processing. As such, upon completion of the routine, the LLDD may
- * immediately free/reuse the CMD IU buffer passed in the call.
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
*
- * If this routine returns error, the lldd should abort the exchange.
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
+ *
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
*
* @target_port: pointer to the (registered) target port the FCP CMD IU
* was received on.
@@ -2194,6 +2318,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
struct nvmet_fc_tgt_queue *queue;
struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
+ unsigned long flags;
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2340,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
* when the fod is freed.
*/
+ spin_lock_irqsave(&queue->qlock, flags);
+
fod = nvmet_fc_alloc_fcp_iod(queue);
- if (!fod) {
+ if (fod) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+ return 0;
+ }
+
+ if (!tgtport->ops->defer_rcv) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
- fcpreq->nvmet_fc_private = fod;
- fod->fcpreq = fcpreq;
- /*
- * put all admin cmds on hw queue id 0. All io commands go to
- * the respective hw queue based on a modulo basis
- */
- fcpreq->hwqid = queue->qid ?
- ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
- memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (deferfcp) {
+ /* Just re-use one that was previously allocated */
+ list_del(&deferfcp->req_list);
+ } else {
+ spin_unlock_irqrestore(&queue->qlock, flags);
- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
- queue_work_on(queue->cpu, queue->work_q, &fod->work);
- else
- nvmet_fc_handle_fcp_rqst(tgtport, fod);
+ /* Now we need to dynamically allocate one */
+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+ if (!deferfcp) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
- return 0;
+ /* For now, use rspaddr / rsplen to save payload information */
+ fcpreq->rspaddr = cmdiubuf;
+ fcpreq->rsplen = cmdiubuf_len;
+ deferfcp->fcp_req = fcpreq;
+
+ /* defer processing till a fod becomes available */
+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+ /* NOTE: the queue lookup reference is still valid */
+
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ return -EOVERFLOW;
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 28c38c756f92..e0a28ea341fe 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -89,6 +89,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
+ u64 mask;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -134,10 +135,9 @@ int of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ dev->coherent_dma_mask &= mask;
+ *dev->dma_mask &= mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 5c63b920b471..ed92c1254cff 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
+ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 253d92409bb3..2225afc1cbbb 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
struct msi_desc *entry;
u16 control;
- if (affd) {
+ if (affd)
masks = irq_create_affinity_masks(nvec, affd);
- if (!masks)
- dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
- nvec);
- }
+
/* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks);
@@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msi_desc *entry;
int ret, i;
- if (affd) {
+ if (affd)
masks = irq_create_affinity_masks(nvec, affd);
- if (!masks)
- dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
- nvec);
- }
for (i = 0, curmsk = masks; i < nvec; i++) {
entry = alloc_msi_entry(&dev->dev, 1, curmsk);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e70c1c7ba1bf..a8da543b3814 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -573,7 +573,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
if (acpi_pm_device_can_wakeup(&bus->self->dev))
- return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
+ return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
bus = bus->parent;
}
@@ -581,7 +581,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
/* We have reached the root bus. */
if (bus->bridge) {
if (acpi_pm_device_can_wakeup(bus->bridge))
- return acpi_pm_set_device_wakeup(bus->bridge, enable);
+ return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
}
return 0;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d51e8738f9c2..e426f8b44c92 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -647,9 +647,7 @@ static int pci_legacy_resume(struct device *dev)
static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
pci_fixup_device(pci_fixup_resume, pci_dev);
-
- if (!pci_has_subordinate(pci_dev))
- pci_enable_wake(pci_dev, PCI_D0, false);
+ pci_enable_wake(pci_dev, PCI_D0, false);
}
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index af0cc3456dc1..68e3b2b0da93 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -514,7 +514,7 @@ EXPORT_SYMBOL(pci_find_resource);
*/
struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
{
- struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+ struct pci_dev *bridge, *highest_pcie_bridge = dev;
bridge = pci_upstream_bridge(dev);
while (bridge && pci_is_pcie(bridge)) {
@@ -1912,6 +1912,13 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int ret = 0;
+ /*
+ * Bridges can only signal wakeup on behalf of subordinate devices,
+ * but that is set up elsewhere, so skip them.
+ */
+ if (pci_has_subordinate(dev))
+ return 0;
+
/* Don't do the same thing twice in a row for one device. */
if (!!enable == !!dev->wakeup_prepared)
return 0;
@@ -4260,6 +4267,41 @@ int pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_reset_function);
/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset. It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_probe_reset_function(dev);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ rc = __pci_reset_function_locked(dev);
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
+/**
* pci_try_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c31310db0404..e6a917b4acd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev)
PCI_EXP_DEVCTL_EXT_TAG);
}
+/**
+ * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
+ * @dev: PCI device to query
+ *
+ * Returns true if the device has enabled relaxed ordering attribute.
+ */
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
+{
+ u16 v;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
+
+ return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
+}
+EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
+
+static void pci_configure_relaxed_ordering(struct pci_dev *dev)
+{
+ struct pci_dev *root;
+
+ /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
+ if (dev->is_virtfn)
+ return;
+
+ if (!pcie_relaxed_ordering_enabled(dev))
+ return;
+
+ /*
+ * For now, we only deal with Relaxed Ordering issues with Root
+ * Ports. Peer-to-Peer DMA is another can of worms.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return;
+
+ if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN);
+ dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
+ }
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_mps(dev);
pci_configure_extended_tags(dev);
+ pci_configure_relaxed_ordering(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6967c6b4cf6b..a346487a9532 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
+#include <linux/platform_data/x86/apple.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -3447,7 +3448,7 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
{
acpi_handle bridge, SXIO, SXFP, SXLV;
- if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ if (!x86_apple_machine)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return;
@@ -3492,7 +3493,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
struct pci_dev *sibling = NULL;
struct pci_dev *nhi = NULL;
- if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ if (!x86_apple_machine)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
@@ -4016,6 +4017,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
+ * Some devices have problems with Transaction Layer Packets with the Relaxed
+ * Ordering Attribute set. Such devices should mark themselves and other
+ * Device Drivers should check before sending TLPs with RO set.
+ */
+static void quirk_relaxedordering_disable(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
+ dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
+}
+
+/*
+ * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
+ * Complex has a Flow Control Credit issue which can cause performance
+ * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
+/*
+ * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * where Upstream Transaction Layer Packets with the Relaxed Ordering
+ * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
+ * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
+ * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
+ * November 10, 2010). As a result, on this platform we can't use Relaxed
+ * Ordering for Upstream TLPs.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
+/*
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
* values for the Attribute as were supplied in the header of the
* corresponding Request, except as explicitly allowed when IDO is used."
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 20f1b4493994..04e929fd0ffe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
},
},
{
+ .ident = "HP Chromebook 11 G5 (Setzer)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+ },
+ },
+ {
.ident = "Acer Chromebook R11 (Cyan)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4d4ef42a39b5..86c4b3fab7b0 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
static const unsigned int mrfld_pwm0_pins[] = { 144 };
static const unsigned int mrfld_pwm1_pins[] = { 145 };
static const unsigned int mrfld_pwm2_pins[] = { 132 };
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f024e25787fc..0c6d7812d6fd 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -37,7 +37,7 @@
#define IRQ_STATUS 0x10
#define IRQ_WKUP 0x18
-#define NB_FUNCS 2
+#define NB_FUNCS 3
#define GPIO_PER_REG 32
/**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
.funcs = {_func1, "gpio"} \
}
+#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
+ { \
+ .name = _name, \
+ .start_pin = _start, \
+ .npins = _nr, \
+ .reg_mask = _mask, \
+ .val = {_v1, _v2, _v3}, \
+ .funcs = {_f1, _f2, "gpio"} \
+ }
+
#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
_f1, _f2) \
{ \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
- PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+ PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
- PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"),
+ PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+ "mii", "mii_err"),
};
const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
};
const struct armada_37xx_pin_data armada_37xx_pin_sb = {
- .nr_pins = 29,
+ .nr_pins = 30,
.name = "GPIO2",
.groups = armada_37xx_sb_groups,
.ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
{
int f;
- for (f = 0; f < NB_FUNCS; f++)
+ for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
if (!strcmp(grp->funcs[f], func))
return f;
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
for (j = 0; j < grp->extra_npins; j++)
grp->pins[i+j] = grp->extra_pin + j;
- for (f = 0; f < NB_FUNCS; f++) {
+ for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
int ret;
/* check for unique functions and count groups */
ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
struct armada_37xx_pin_group *gp = &info->groups[g];
int f;
- for (f = 0; f < NB_FUNCS; f++) {
+ for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
if (strcmp(gp->funcs[f], name) == 0) {
*groups = gp->name;
groups++;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 159580c04b14..47a392bc73c8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */
PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
+ SUNXI_FUNCTION(0x5, "sim"), /* DET */
SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index a433a306a2d0..c75e094b2d90 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {184, 185};
static const int usb2_muxvals[] = {0, 0};
-static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_pins[] = {187, 188};
static const int usb3_muxvals[] = {0, 0};
static const unsigned port_range0_pins[] = {
300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 787e3967bd5c..f828ee340a98 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
struct zx_pinctrl_soc_info *info = zpctl->info;
const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
struct zx_pin_data *data = pindesc->drv_data;
- struct zx_mux_desc *mux = data->muxes;
- u32 mask = (1 << data->width) - 1;
- u32 offset = data->offset;
- u32 bitpos = data->bitpos;
+ struct zx_mux_desc *mux;
+ u32 mask, offset, bitpos;
struct function_desc *func;
unsigned long flags;
u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
if (!data)
return -EINVAL;
+ mux = data->muxes;
+ mask = (1 << data->width) - 1;
+ offset = data->offset;
+ bitpos = data->bitpos;
+
func = pinmux_generic_get_function(pctldev, func_selector);
if (!func)
return -EINVAL;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
kfree(ptp);
}
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+ struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+ aux_work.work);
+ struct ptp_clock_info *info = ptp->info;
+ long delay;
+
+ delay = info->do_aux_work(info);
+
+ if (delay >= 0)
+ kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
+ if (ptp->info->do_aux_work) {
+ char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
+
+ kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ ptp->kworker = kthread_create_worker(0, worker_name ?
+ worker_name : info->name);
+ kfree(worker_name);
+ if (IS_ERR(ptp->kworker)) {
+ err = PTR_ERR(ptp->kworker);
+ pr_err("failed to create ptp aux_worker %d\n", err);
+ goto kworker_err;
+ }
+ }
+
err = ptp_populate_pin_groups(ptp);
if (err)
goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ if (ptp->kworker) {
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+ kthread_destroy_worker(ptp->kworker);
+ }
+
/* Release the clock's resources. */
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
}
EXPORT_SYMBOL(ptp_find_pin);
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+ return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/posix-clock.h>
#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
struct attribute_group pin_attr_group;
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */
const struct attribute_group *pin_attr_groups[2];
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work aux_work;
};
/*
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4fac49e55d47..4b43aa62fbc7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1301,7 +1301,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0x12,
};
static int ds1307_probe(struct i2c_client *client,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index ba6ac83a6c25..5ccfdc80d0ec 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
- ccw->cda = (__u32) (addr_t) (iter->ch_ccw +
+ ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rtable *rt = (struct rtable *) dst;
__be32 *pkey = &ip_hdr(skb)->daddr;
- if (rt->rt_gateway)
+ if (rt && rt->rt_gateway)
pkey = &rt->rt_gateway;
/* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rt6_info *rt = (struct rt6_info *) dst;
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
- if (!ipv6_addr_any(&rt->rt6i_gateway))
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
pkey = &rt->rt6i_gateway;
/* IPv6 */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f4538d7a3016..d145e0d90227 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -47,6 +47,17 @@ config SCSI_NETLINK
default n
depends on NET
+config SCSI_MQ_DEFAULT
+ bool "SCSI: use blk-mq I/O path by default"
+ depends on SCSI
+ ---help---
+ This option enables the new blk-mq based I/O path for SCSI
+ devices by default. With the option the scsi_mod.use_blk_mq
+ module/boot option defaults to Y, without it to N, but it can
+ still be overridden either way.
+
+ If unsure say N.
+
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 707ee2f5954d..a1a2c71e1626 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
- sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
+ int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+
+ sp[data_size - 1] = '\0';
while (*sp == ' ')
++sp;
if (*sp) {
@@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
static int aac_get_container_name(struct scsi_cmnd * scsicmd)
{
int status;
+ int data_size;
struct aac_get_name *dinfo;
struct fib * cmd_fibcontext;
struct aac_dev * dev;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
aac_fib_init(cmd_fibcontext);
@@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
- dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
+ dinfo->count = cpu_to_le32(data_size - 1);
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
@@ -3198,10 +3203,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
return -EBUSY;
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
- if (qd.cnum == -1)
+ if (qd.cnum == -1) {
+ if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
+ return -EINVAL;
qd.cnum = qd.id;
- else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
- {
+ } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d31a9bc2ba69..ee2667e20e42 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2274,7 +2274,7 @@ struct aac_get_name_resp {
__le32 parm3;
__le32 parm4;
__le32 parm5;
- u8 data[16];
+ u8 data[17];
};
#define CT_CID_TO_32BITS_UID 165
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dfe709a7138..6844ba361616 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
};
/**
- * bnx2fc_percpu_thread_create - Create a receive thread for an
- * online CPU
+ * bnx2fc_cpu_online - Create a receive thread for an online CPU
*
* @cpu: cpu index for the online cpu
*/
-static void bnx2fc_percpu_thread_create(unsigned int cpu)
+static int bnx2fc_cpu_online(unsigned int cpu)
{
struct bnx2fc_percpu_s *p;
struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
(void *)p, cpu_to_node(cpu),
"bnx2fc_thread/%d", cpu);
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
/* bind thread to the cpu */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- p->iothread = thread;
- wake_up_process(thread);
- }
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
}
-static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+static int bnx2fc_cpu_offline(unsigned int cpu)
{
struct bnx2fc_percpu_s *p;
struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
thread = p->iothread;
p->iothread = NULL;
-
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
if (thread)
kthread_stop(thread);
-}
-
-
-static int bnx2fc_cpu_online(unsigned int cpu)
-{
- printk(PFX "CPU %x online: Create Rx thread\n", cpu);
- bnx2fc_percpu_thread_create(cpu);
- return 0;
-}
-
-static int bnx2fc_cpu_dead(unsigned int cpu)
-{
- printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2fc_percpu_thread_destroy(cpu);
return 0;
}
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2fc_percpu_thread_create(cpu);
-
- rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "scsi/bnx2fc:online",
- bnx2fc_cpu_online, NULL);
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
+ bnx2fc_cpu_online, bnx2fc_cpu_offline);
if (rc < 0)
- goto stop_threads;
+ goto stop_thread;
bnx2fc_online_state = rc;
- cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
- NULL, bnx2fc_cpu_dead);
- put_online_cpus();
-
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
-
return 0;
-stop_threads:
- for_each_online_cpu(cpu)
- bnx2fc_percpu_thread_destroy(cpu);
- put_online_cpus();
+stop_thread:
kthread_stop(l2_thread);
free_wq:
destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
struct fcoe_percpu_s *bg;
struct task_struct *l2_thread;
struct sk_buff *skb;
- unsigned int cpu = 0;
/*
* NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
- get_online_cpus();
- /* Destroy per cpu threads */
- for_each_online_cpu(cpu) {
- bnx2fc_percpu_thread_destroy(cpu);
- }
-
- cpuhp_remove_state_nocalls(bnx2fc_online_state);
- cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
-
- put_online_cpus();
+ cpuhp_remove_state(bnx2fc_online_state);
destroy_workqueue(bnx2fc_wq);
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 913c750205ce..26de61d65a4d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
return work;
}
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+ unsigned int cpu = wqe % num_possible_cpus();
+ struct bnx2fc_percpu_s *fps;
+ struct bnx2fc_work *work;
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (fps->iothread) {
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work) {
+ list_add_tail(&work->list, &fps->work_list);
+ wake_up_process(fps->iothread);
+ spin_unlock_bh(&fps->fp_work_lock);
+ return;
+ }
+ }
+ spin_unlock_bh(&fps->fp_work_lock);
+ bnx2fc_process_cq_compl(tgt, wqe);
+}
+
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{
struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- /* Pending work request completion */
- struct bnx2fc_work *work = NULL;
- struct bnx2fc_percpu_s *fps = NULL;
- unsigned int cpu = wqe % num_possible_cpus();
-
- fps = &per_cpu(bnx2fc_percpu, cpu);
- spin_lock_bh(&fps->fp_work_lock);
- if (unlikely(!fps->iothread))
- goto unlock;
-
- work = bnx2fc_alloc_work(tgt, wqe);
- if (work)
- list_add_tail(&work->list,
- &fps->work_list);
-unlock:
- spin_unlock_bh(&fps->fp_work_lock);
-
- /* Pending work request completion */
- if (fps->iothread && work)
- wake_up_process(fps->iothread);
- else
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_pending_work(tgt, wqe);
num_free_sqes++;
}
cqe++;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 86afc002814c..4ebcda8d9500 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
/**
- * bnx2i_percpu_thread_create - Create a receive thread for an
- * online CPU
+ * bnx2i_cpu_online - Create a receive thread for an online CPU
*
* @cpu: cpu index for the online cpu
*/
-static void bnx2i_percpu_thread_create(unsigned int cpu)
+static int bnx2i_cpu_online(unsigned int cpu)
{
struct bnx2i_percpu_s *p;
struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
cpu_to_node(cpu),
"bnx2i_thread/%d", cpu);
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
/* bind thread to the cpu */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- p->iothread = thread;
- wake_up_process(thread);
- }
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
}
-
-static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+static int bnx2i_cpu_offline(unsigned int cpu)
{
struct bnx2i_percpu_s *p;
struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
spin_unlock_bh(&p->p_work_lock);
if (thread)
kthread_stop(thread);
-}
-
-static int bnx2i_cpu_online(unsigned int cpu)
-{
- pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
- bnx2i_percpu_thread_create(cpu);
- return 0;
-}
-
-static int bnx2i_cpu_dead(unsigned int cpu)
-{
- pr_info("CPU %x offline: Remove Rx thread\n", cpu);
- bnx2i_percpu_thread_destroy(cpu);
return 0;
}
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_create(cpu);
-
- err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "scsi/bnx2i:online",
- bnx2i_cpu_online, NULL);
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
+ bnx2i_cpu_online, bnx2i_cpu_offline);
if (err < 0)
- goto remove_threads;
+ goto unreg_driver;
bnx2i_online_state = err;
-
- cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
- NULL, bnx2i_cpu_dead);
- put_online_cpus();
return 0;
-remove_threads:
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_destroy(cpu);
- put_online_cpus();
+unreg_driver:
cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
- unsigned cpu = 0;
mutex_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_destroy(cpu);
-
- cpuhp_remove_state_nocalls(bnx2i_online_state);
- cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
- put_online_cpus();
+ cpuhp_remove_state(bnx2i_online_state);
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 2029ad225121..5be0086142ca 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw)
if (csio_is_hw_ready(hw))
return 0;
- else
+ else if (csio_match_state(hw, csio_hws_uninit))
return -EINVAL;
+ else
+ return -ENODEV;
}
int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ea0c31086cc6..dcd074169aa9 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hw);
- if (csio_hw_start(hw) != 0) {
- dev_err(&pdev->dev,
- "Failed to start FW, continuing in debug mode.\n");
- return 0;
+ rv = csio_hw_start(hw);
+ if (rv) {
+ if (rv == -EINVAL) {
+ dev_err(&pdev->dev,
+ "Failed to start FW, continuing in debug mode.\n");
+ return 0;
+ }
+ goto err_lnode_exit;
}
sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index a69a9ac836f5..1d02cf9fe06c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk)
goto rel_resource;
}
+ if (!(n->nud_state & NUD_VALID))
+ neigh_event_send(n, NULL);
+
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b0c68d24db01..f838bd73befa 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3351,6 +3351,16 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
+ if (ioa_cfg->scsi_unblock) {
+ ioa_cfg->scsi_unblock = 0;
+ ioa_cfg->scsi_blocked = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->scsi_blocked)
+ scsi_block_requests(ioa_cfg->host);
+ }
+
if (!ioa_cfg->scan_enabled) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
@@ -4935,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
if (ipr_is_vset_device(res)) {
sdev->scsi_level = SCSI_SPC_3;
+ sdev->no_report_opcodes = 1;
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
@@ -7211,9 +7222,8 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
ENTER;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
ipr_trace;
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ ioa_cfg->scsi_unblock = 1;
+ schedule_work(&ioa_cfg->work_q);
}
ioa_cfg->in_reset_reload = 0;
@@ -7287,13 +7297,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock(ioa_cfg->host->host_lock);
-
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
- scsi_block_requests(ioa_cfg->host);
-
+ ioa_cfg->scsi_unblock = 1;
schedule_work(&ioa_cfg->work_q);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -9249,8 +9253,11 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
wmb();
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ioa_cfg->scsi_unblock = 0;
+ ioa_cfg->scsi_blocked = 1;
scsi_block_requests(ioa_cfg->host);
+ }
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
@@ -9306,9 +9313,8 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
wake_up_all(&ioa_cfg->reset_wait_q);
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ ioa_cfg->scsi_unblock = 1;
+ schedule_work(&ioa_cfg->work_q);
}
return;
} else {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index e98a87a65335..c7f0e9e3cd7d 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1488,6 +1488,8 @@ struct ipr_ioa_cfg {
u8 cfg_locked:1;
u8 clear_isr:1;
u8 probe_done:1;
+ u8 scsi_unblock:1;
+ u8 scsi_blocked:1;
u8 revid;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ed48ed38e79..7ee1a94c0b33 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Release %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5cc8b0f7d885..744f3f395b64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
- "FCP: Rcv %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fbeec344c6cc..bbbd0f84160d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *rsp)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct lpfc_hba *phba = ctxp->phba;
+
+ lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+ ctxp->oxid, ctxp->size, smp_processor_id());
+
+ tgtp = phba->targetport->private;
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+ .defer_rcv = lpfc_nvmet_defer_rcv,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ /* defer reposting rcv buffer till .defer_rcv callback */
+ ctxp->rqb_buffer = nvmebuf;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
+
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index e675ef17be08..48a76788b003 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop;
+ atomic_t rcv_fcp_cmd_defer;
atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 316c3df0c3fd..71c4746341ea 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
fail_start_aen:
fail_io_attach:
megasas_mgmt_info.count--;
- megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
megasas_mgmt_info.max_index--;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 4d038926a455..351f06dfc5a0 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -528,7 +528,8 @@ struct fip_vlan {
#define QEDF_WRITE (1 << 0)
#define MAX_FIBRE_LUNS 0xffffffff
-#define QEDF_MAX_NUM_CQS 8
+#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
+ num_online_cpus())
/*
* PCI function probe defines
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index eb07f1de8afa..59c18ca4cda9 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
/* If a SRR times out, simply free resources */
if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
- goto out_free;
+ goto out_put;
/* Normalize response data into struct fc_frame */
mp_req = &(srr_req->mp_req);
@@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
- goto out_free;
+ goto out_put;
}
/* Copy frame header from firmware into fp */
@@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
}
fc_frame_free(fp);
-out_free:
+out_put:
/* Put reference for original command since SRR completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
kfree(cb_arg);
}
@@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
/* If a REC times out, free resources */
if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
- goto out_free;
+ goto out_put;
/* Normalize response data into struct fc_frame */
mp_req = &(rec_req->mp_req);
@@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
- goto out_free;
+ goto out_put;
}
/* Copy frame header from firmware into fp */
@@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
out_free_frame:
fc_frame_free(fp);
-out_free:
+out_put:
/* Put reference for original command since REC completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
kfree(cb_arg);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7786c97e033f..1d13c9ca517d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
* we allocation is the minimum off:
*
* Number of CPUs
- * Number of MSI-X vectors
- * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
+ * Number allocated by qed for our PCI function
*/
- qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
- num_online_cpus());
+ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err1;
}
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
+ goto err1;
+ }
+
/* queue allocation code should come here
* order should be
* slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
- /* Learn information crucial for qedf to progress */
- rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
- if (rc) {
- QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
- goto err1;
- }
-
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 33142610882f..b18646d6057f 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -401,9 +401,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
- if (!test_bit(i, vha->hw->req_qid_map))
- continue;
-
if (req || !buf) {
length = req ?
req->length : REQUEST_ENTRY_CNT_24XX;
@@ -418,9 +415,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
- if (!test_bit(i, vha->hw->rsp_qid_map))
- continue;
-
if (rsp || !buf) {
length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -660,9 +654,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
- if (!test_bit(i, vha->hw->req_qid_map))
- continue;
-
if (req || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
@@ -675,9 +666,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
- if (!test_bit(i, vha->hw->rsp_qid_map))
- continue;
-
if (rsp || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b20da0d27ad7..3f82ea1b72dc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- cmd->data_work = 1;
- if (cmd->aborted) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- unsigned long flags;
if (qlt_abort_cmd(cmd))
return;
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if ((cmd->state == QLA_TGT_STATE_NEW)||
- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
- DATA_WORK_NOT_FREE(cmd))) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * cmd has not reached fw, Use this trigger to free it.
- */
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- return;
-
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3d38c6d463b8..1bf274e3b2b6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -800,7 +800,11 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
+#ifdef CONFIG_SCSI_MQ_DEFAULT
bool scsi_use_blk_mq = true;
+#else
+bool scsi_use_blk_mq = false;
+#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bea36adeee17..e2647f2d4430 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
+ if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
+ sd_zbc_write_unlock_zone(SCpnt);
+
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 96855df9f49d..8aa54779aac1 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
test_and_set_bit(zno, sdkp->zones_wlock))
return BLKPREP_DEFER;
+ WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
+ cmd->flags |= SCMD_ZONE_WRITE_LOCK;
+
return BLKPREP_OK;
}
@@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- if (sdkp->zones_wlock) {
+ if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
+ cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
clear_bit_unlock(zno, sdkp->zones_wlock);
smp_mb__after_atomic();
}
@@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
- /* Unlock the zone */
- sd_zbc_write_unlock_zone(cmd);
-
if (result &&
sshdr->sense_key == ILLEGAL_REQUEST &&
sshdr->asc == 0x21)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index f1cdf32d7514..8927f9f54ad9 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -99,7 +99,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
- if (unlikely(!ret))
+ if (unlikely(ret))
return ret;
recv_page_code = ((unsigned char *)buf)[0];
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4fe606b000b4..84e782d8e7c3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
return count;
}
-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
-{
- switch (hp->dxfer_direction) {
- case SG_DXFER_NONE:
- if (hp->dxferp || hp->dxfer_len > 0)
- return false;
- return true;
- case SG_DXFER_FROM_DEV:
- /*
- * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
- * can either be NULL or != NULL so there's no point in checking
- * it either. So just return true.
- */
- return true;
- case SG_DXFER_TO_DEV:
- case SG_DXFER_TO_FROM_DEV:
- if (!hp->dxferp || hp->dxfer_len == 0)
- return false;
- return true;
- case SG_DXFER_UNKNOWN:
- if ((!hp->dxferp && hp->dxfer_len) ||
- (hp->dxferp && hp->dxfer_len == 0))
- return false;
- return true;
- default:
- return false;
- }
-}
-
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
@@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (!sg_is_valid_dxfer(hp))
+ if (hp->dxfer_len >= SZ_256M)
return -EINVAL;
k = sg_start_req(srp, cmnd);
@@ -1050,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val > SG_MAX_QUEUE)
+ if (val >= SG_MAX_QUEUE)
break;
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
rinfo[val].req_state = srp->done + 1;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 8e5013d9cad4..94e402ed30f6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4299,11 +4299,11 @@ static int st_probe(struct device *dev)
kref_init(&tpnt->kref);
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
- disk->queue = SDp->request_queue;
/* SCSI tape doesn't register this gendisk via add_disk(). Manually
* take queue reference that release_disk() expects. */
- if (!blk_get_queue(disk->queue))
+ if (!blk_get_queue(SDp->request_queue))
goto out_put_disk;
+ disk->queue = SDp->request_queue;
tpnt->driver = &st_template;
tpnt->device = SDp;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3039072911a5..afc7ecc3c187 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -200,16 +200,11 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
domain->dev = &pdev->dev;
- ret = pm_genpd_init(&domain->genpd, NULL, true);
- if (ret) {
- dev_err(domain->dev, "Failed to init power domain\n");
- return ret;
- }
-
domain->regulator = devm_regulator_get_optional(domain->dev, "power");
if (IS_ERR(domain->regulator)) {
if (PTR_ERR(domain->regulator) != -ENODEV) {
- dev_err(domain->dev, "Failed to get domain's regulator\n");
+ if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's regulator\n");
return PTR_ERR(domain->regulator);
}
} else {
@@ -217,6 +212,12 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err(domain->dev, "Failed to init power domain\n");
+ return ret;
+ }
+
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 279e7c5551dd..39225de9d7f1 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -745,6 +745,9 @@ void *knav_pool_create(const char *name,
bool slot_found;
int ret;
+ if (!kdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
if (!kdev->dev)
return ERR_PTR(-ENODEV);
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index b0b283810e72..de31b9389e2e 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -176,6 +176,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
ti_sci_pd->dev = dev;
+ ti_sci_pd->pd.name = "ti_sci_pd";
+
ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4fcbb0aa71d3..7d920ea19957 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -40,6 +40,7 @@
#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/highmem.h>
+#include <linux/platform_data/x86/apple.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
@@ -1693,6 +1694,35 @@ static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
#ifdef CONFIG_ACPI
+static void acpi_spi_parse_apple_properties(struct spi_device *spi)
+{
+ struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
+ const union acpi_object *obj;
+
+ if (!x86_apple_machine)
+ return;
+
+ if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length >= 4)
+ spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
+
+ if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8)
+ spi->bits_per_word = *(u64 *)obj->buffer.pointer;
+
+ if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
+ spi->mode |= SPI_LSB_FIRST;
+
+ if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
+ spi->mode |= SPI_CPOL;
+
+ if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
+ spi->mode |= SPI_CPHA;
+}
+
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct spi_device *spi = data;
@@ -1766,6 +1796,8 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
acpi_spi_add_resource, spi);
acpi_dev_free_resource_list(&resource_list);
+ acpi_spi_parse_apple_properties(spi);
+
if (ret < 0 || !spi->max_speed_hz) {
spi_dev_put(spi);
return AE_OK;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ca11be21f64b..34ca7823255d 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
continue;
}
+ set_current_state(TASK_RUNNING);
wp = async->buf_write_ptr;
n1 = min(n, async->prealloc_bufsz - wp);
n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
}
continue;
}
+
+ set_current_state(TASK_RUNNING);
rp = async->buf_read_ptr;
n1 = min(n, async->prealloc_bufsz - rp);
n2 = n - n1;
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index b37a6f48225f..8ea3920400a0 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -16,9 +16,9 @@
static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
{
- return strcmp(obj_type, "dpbp") ||
- strcmp(obj_type, "dpmcp") ||
- strcmp(obj_type, "dpcon");
+ return strcmp(obj_type, "dpbp") == 0 ||
+ strcmp(obj_type, "dpmcp") == 0 ||
+ strcmp(obj_type, "dpcon") == 0;
}
/**
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index a6a8393d6664..3e00df74b18c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad2s1210_state *st = iio_priv(indio_dev);
- bool negative;
+ u16 negative;
int ret = 0;
u16 pos;
s16 vel;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d283341cfe43..56cd4e5e51b2 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index e583dd8a418b..d4fa41be80f9 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
- if (unlikely(!csk))
+ if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
- else
+ goto rel_skb;
+ } else {
cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+ }
cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
}
static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index dda13f1af38e..514986b57c2d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
static void
cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
- unsigned int nents)
+ unsigned int nents, u32 skip)
{
struct skb_seq_state st;
const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
}
consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
- buf_len, consumed);
+ buf_len, skip + consumed);
}
}
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
}
cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->se_cmd.data_length);
if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ u32 skip = data_offset % PAGE_SIZE;
+
sg_off = data_offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+ sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
check_payload:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e4975dd1b1..5001261f5d69 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
+ atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
+ kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
return text_length;
if (completed) {
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
} else {
- hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+ hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e9bdc8b86e7d..dc13afbd4c88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 36913734c6bc..02e8a5d86658 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0;
- list_del(&acl->acl_list);
+ list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 97fed9a298bd..836d552b0385 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
}
mutex_lock(&se_tpg->acl_node_mutex);
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
- list_del(&se_nacl->acl_list);
+ list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80ee130f8253..942d094269fb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
block_remaining);
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
- offset = DATA_BLOCK_SIZE - block_remaining;
- to += offset;
if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
(*iov)->iov_len = copy_bytes;
}
if (copy_data) {
- memcpy(to, from + sg->length - sg_remaining,
- copy_bytes);
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset,
+ from + sg->length - sg_remaining,
+ copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes);
}
sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- from += offset;
tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to + sg->length - sg_remaining, from,
+ memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes);
sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config);
+ /* If the old string exists, free it */
+ kfree(info->name);
info->name = str;
return 0;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 308b6e17c88a..fe2f00ceafc5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
int res;
enum tb_port_type type;
+ /*
+ * Some DROMs list more ports than the controller actually has
+ * so we skip those but allow the parser to continue.
+ */
+ if (header->index > sw->config.max_port_number) {
+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+ return 0;
+ }
+
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index bdaac1ff00a5..53250fc057e1 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -13,9 +13,9 @@
*/
#include <linux/delay.h>
-#include <linux/dmi.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -102,11 +102,6 @@ static inline u64 get_route(u32 route_hi, u32 route_lo)
return (u64)route_hi << 32 | route_lo;
}
-static inline bool is_apple(void)
-{
- return dmi_match(DMI_BOARD_VENDOR, "Apple Inc.");
-}
-
static bool icm_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
@@ -176,7 +171,7 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size,
static bool icm_fr_is_supported(struct tb *tb)
{
- return !is_apple();
+ return !x86_apple_machine;
}
static inline int icm_fr_get_switch_index(u32 port)
@@ -517,7 +512,7 @@ static bool icm_ar_is_supported(struct tb *tb)
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
*/
- if (!is_apple())
+ if (!x86_apple_machine)
return true;
/*
@@ -1011,7 +1006,7 @@ static int icm_start(struct tb *tb)
* don't provide images publicly either. To be on the safe side
* prevent root switch NVM upgrade on Macs for now.
*/
- tb->root_switch->no_nvm_upgrade = is_apple();
+ tb->root_switch->no_nvm_upgrade = x86_apple_machine;
ret = tb_switch_add(tb->root_switch);
if (ret)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1b02ca0b6129..0b22ad9d68b4 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -453,7 +453,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm;
struct tb *tb;
- if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ if (!x86_apple_machine)
return NULL;
tb = tb_domain_alloc(nhi, sizeof(*tcm));
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 284749fb0f6b..a6d5164c33a9 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
#ifdef CONFIG_UNIX98_PTYS
if (tty->driver == ptm_driver) {
mutex_lock(&devpts_mutex);
- if (tty->link->driver_data) {
- struct path *path = tty->link->driver_data;
-
- devpts_pty_kill(path->dentry);
- path_put(path);
- kfree(path);
- }
+ if (tty->link->driver_data)
+ devpts_pty_kill(tty->link->driver_data);
mutex_unlock(&devpts_mutex);
}
#endif
@@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { }
static struct cdev ptmx_cdev;
/**
- * pty_open_peer - open the peer of a pty
- * @tty: the peer of the pty being opened
+ * ptm_open_peer - open the peer of a pty
+ * @master: the open struct file of the ptmx device node
+ * @tty: the master of the pty being opened
+ * @flags: the flags for open
*
- * Open the cached dentry in tty->link, providing a safe way for userspace
- * to get the slave end of a pty (where they have the master fd and cannot
- * access or trust the mount namespace /dev/pts was mounted inside).
+ * Provide a race free way for userspace to open the slave end of a pty
+ * (where they have the master fd and cannot access or trust the mount
+ * namespace /dev/pts was mounted inside).
*/
-static struct file *pty_open_peer(struct tty_struct *tty, int flags)
-{
- if (tty->driver->subtype != PTY_TYPE_MASTER)
- return ERR_PTR(-EIO);
- return dentry_open(tty->link->driver_data, flags, current_cred());
-}
-
-static int pty_get_peer(struct tty_struct *tty, int flags)
+int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
{
int fd = -1;
- struct file *filp = NULL;
+ struct file *filp;
int retval = -EINVAL;
+ struct path path;
+
+ if (tty->driver != ptm_driver)
+ return -EIO;
fd = get_unused_fd_flags(0);
if (fd < 0) {
@@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags)
goto err;
}
- filp = pty_open_peer(tty, flags);
+ /* Compute the slave's path */
+ path.mnt = devpts_mntget(master, tty->driver_data);
+ if (IS_ERR(path.mnt)) {
+ retval = PTR_ERR(path.mnt);
+ goto err_put;
+ }
+ path.dentry = tty->link->driver_data;
+
+ filp = dentry_open(&path, flags, current_cred());
+ mntput(path.mnt);
if (IS_ERR(filp)) {
retval = PTR_ERR(filp);
goto err_put;
@@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
return pty_get_pktmode(tty, (int __user *)arg);
case TIOCGPTN: /* Get PT Number */
return put_user(tty->index, (unsigned int __user *)arg);
- case TIOCGPTPEER: /* Open the other end */
- return pty_get_peer(tty, (int) arg);
case TIOCSIG: /* Send signal to other side of pty */
return pty_signal(tty, (int) arg);
}
@@ -791,7 +792,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
{
struct pts_fs_info *fsi;
struct tty_struct *tty;
- struct path *pts_path;
struct dentry *dentry;
int retval;
int index;
@@ -845,26 +845,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
retval = PTR_ERR(dentry);
goto err_release;
}
- /* We need to cache a fake path for TIOCGPTPEER. */
- pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
- if (!pts_path)
- goto err_release;
- pts_path->mnt = filp->f_path.mnt;
- pts_path->dentry = dentry;
- path_get(pts_path);
- tty->link->driver_data = pts_path;
+ tty->link->driver_data = dentry;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
- goto err_path_put;
+ goto err_release;
tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
tty_unlock(tty);
return 0;
-err_path_put:
- path_put(pts_path);
- kfree(pts_path);
err_release:
tty_unlock(tty);
// This will also put-ref the fsi
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b5def356af63..1aab3010fbfa 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->dl_write)
uart->dl_write = up->dl_write;
- if (serial8250_isa_config != NULL)
- serial8250_isa_config(0, &uart->port,
- &uart->capabilities);
+ if (uart->port.type != PORT_8250_CIR) {
+ if (serial8250_isa_config != NULL)
+ serial8250_isa_config(0, &uart->port,
+ &uart->capabilities);
+
+ ret = uart_add_one_port(&serial8250_reg,
+ &uart->port);
+ if (ret == 0)
+ ret = uart->port.line;
+ } else {
+ dev_info(uart->port.dev,
+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+ uart->port.iobase,
+ (unsigned long long)uart->port.mapbase,
+ uart->port.irq);
- ret = uart_add_one_port(&serial8250_reg, &uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ ret = 0;
+ }
}
mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a857bb34fbb..1888d168a41c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
.fixed_options = true,
};
-/*
- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
- * occasionally getting stuck as 1. To avoid the potential for a hang, check
- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
- * implementations, so only do so if an affected platform is detected in
- * parse_spcr().
- */
-static bool qdf2400_e44_present = false;
-
+#ifdef CONFIG_ACPI_SPCR_TABLE
static struct vendor_data vendor_qdt_qdf2400_e44 = {
.reg_offset = pl011_std_offsets,
.fr_busy = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
.always_enabled = true,
.fixed_options = true,
};
+#endif
static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
resource_size_t addr;
int i;
- if (strcmp(name, "qdf2400_e44") == 0) {
- pr_info_once("UART: Working around QDF2400 SoC erratum 44");
- qdf2400_e44_present = true;
- } else if (strcmp(name, "pl011") != 0) {
+ /*
+ * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
+ * have a distinct console name, so make sure we check for that.
+ * The actual implementation of the erratum occurs in the probe
+ * function.
+ */
+ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
return -ENODEV;
- }
if (uart_parse_earlycon(options, &iotype, &addr, &options))
return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
}
uap->port.irq = ret;
- uap->reg_offset = vendor_sbsa.reg_offset;
- uap->vendor = qdf2400_e44_present ?
- &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
+#ifdef CONFIG_ACPI_SPCR_TABLE
+ if (qdf2400_e44_present) {
+ dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
+ uap->vendor = &vendor_qdt_qdf2400_e44;
+ } else
+#endif
+ uap->vendor = &vendor_sbsa;
+
+ uap->reg_offset = uap->vendor->reg_offset;
uap->fifosize = 32;
- uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
+ uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 974b13d24401..10c4038c0e8d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCSSERIAL:
tty_warn_deprecated_flags(p);
break;
+ case TIOCGPTPEER:
+ /* Special because the struct file is needed */
+ return ptm_open_peer(file, tty, (int)arg);
default:
retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
if (retval != -ENOIOCTLCMD)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ab1bb3b538ac..7f277b092b5b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
- list_for_each_entry (urb, &ep->urb_list, urb_list) {
+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+ set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6e6797d145dd..822f8c50e423 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status, i;
+ int status = -ENODEV;
+ int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
done:
hub_port_disable(hub, port1, 1);
- if (hcd->driver->relinquish_port && !hub->hdev->parent)
- hcd->driver->relinquish_port(hcd, port1);
-
+ if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+ if (status != -ENOTCONN && status != -ENODEV)
+ hcd->driver->relinquish_port(hcd, port1);
+ }
}
/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edfcdc18..574da2b4529c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6b299c7b7656..f064f1549333 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /*
+ * USB Specification 2.0 Section 5.9.2 states that: "If
+ * there is only a single transaction in the microframe,
+ * only a DATA0 data packet PID is used. If there are
+ * two transactions per microframe, DATA1 is used for
+ * the first transaction data packet and DATA0 is used
+ * for the second transaction data packet. If there are
+ * three transactions per microframe, DATA2 is used for
+ * the first transaction data packet, DATA1 is used for
+ * the second, and DATA0 is used for the third."
+ *
+ * IOW, we should satisfy the following cases:
+ *
+ * 1) length <= maxpacket
+ * - DATA0
+ *
+ * 2) maxpacket < length <= (2 * maxpacket)
+ * - DATA1, DATA0
+ *
+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+ * - DATA2, DATA1, DATA0
+ */
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ unsigned int mult = ep->mult - 1;
+ unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+ if (length <= (2 * maxp))
+ mult--;
+
+ if (length <= maxp)
+ mult--;
+
+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 62dc9c7798e7..e1de8fe599a3 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
return usb3_req;
}
-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
- struct renesas_usb3_request *usb3_req, int status)
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ int status)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- unsigned long flags;
dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
status);
usb3_req->req.status = status;
- spin_lock_irqsave(&usb3->lock, flags);
usb3_ep->started = false;
list_del_init(&usb3_req->queue);
- spin_unlock_irqrestore(&usb3->lock, flags);
+ spin_unlock(&usb3->lock);
usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+ spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req, int status)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ __usb3_request_done(usb3_ep, usb3_req, status);
+ spin_unlock_irqrestore(&usb3->lock, flags);
}
static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c8989c62a262..c8f38649f749 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+ }
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
- return 0;
-
- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
- return 1;
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+ /*
+ * Our dear uPD72020{1,2} friend only partially resets when
+ * asked to via the XHCI interface, and may end up doing DMA
+ * at the wrong addresses, as it keeps the top 32bit of some
+ * addresses from its previous programming under obscure
+ * circumstances.
+ * Give it a good wack at probe time. Unfortunately, this
+ * needs to happen before we've had a chance to discover any
+ * quirk, or the system will be in a rather bad state.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ (pdev->device == 0x0014 || pdev->device == 0x0015))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 655994480198..5582cbafecd4 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
#else
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5b0fa553c8bc..8071c8fdd15e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver = (struct hc_driver *)id->driver_data;
+ /* For some HW implementation, a XHCI reset is just not enough... */
+ if (usb_xhci_needs_pci_reset(dev)) {
+ dev_info(&dev->dev, "Resetting\n");
+ if (pci_reset_function_locked(dev))
+ dev_warn(&dev->dev, "Reset failed");
+ }
+
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 76decb8011eb..3344ffd5bb13 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
+ mdelay(1);
}
}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8fb86a5f458e..3d0dd2f97415 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -197,6 +197,7 @@ struct msm_otg {
struct regulator *v3p3;
struct regulator *v1p8;
struct regulator *vddcx;
+ struct regulator_bulk_data supplies[3];
struct reset_control *phy_rst;
struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
int ret = 0;
struct device_node *np = pdev->dev.of_node;
struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
return motg->irq;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ motg->supplies[0].supply = "vddcx";
+ motg->supplies[1].supply = "v3p3";
+ motg->supplies[2].supply = "v1p8";
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+ ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
+ motg->supplies);
if (ret)
return ret;
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
+ motg->vddcx = motg->supplies[0].consumer;
+ motg->v3p3 = motg->supplies[1].consumer;
+ motg->v1p8 = motg->supplies[2].consumer;
clk_set_rate(motg->clk, 60000000);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 93fba9033b00..2c8161bcf5b5 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
- if (!pipe) {
- ret = -EINVAL;
+ if (!pipe)
goto out;
- }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b331c9f2..02b67abfc2a1 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_OTG 0x00000030
+#define UGCTRL2_VBUSSEL 0x00000400
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+ UGCTRL2_VBUSSEL);
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f64e914a8985..2d945c9f975c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11105d..fe123153b1a5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c9ebefd8f35f..a585b477415d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..3b5a15d1dc0d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_UC485 0x2021
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f329e71..cde115359793 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
- "",
+ "INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
+ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 06615934fed1..0dceb9fa3a06 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
+ struct scsi_cmnd *srb;
for (;;) {
usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* When we are called with no command pending, we're done */
+ srb = us->srb;
if (us->srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
/* lock access to the state */
scsi_lock(host);
- /* indicate that the command is done */
- if (us->srb->result != DID_ABORT << 16) {
- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
- us->srb->result);
- us->srb->scsi_done(us->srb);
- } else {
+ /* was the command aborted? */
+ if (us->srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
+ srb = NULL; /* Don't call srb->scsi_done() */
}
/*
@@ -429,6 +428,13 @@ SkipForAbort:
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
+
+ /* now that the locks are released, notify the SCSI core */
+ if (srb) {
+ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+ srb->result);
+ srb->scsi_done(srb);
+ }
} /* for (;;) */
/* Wait until we are told to stop */
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ff01bed7112f..1e784adb89b1 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -17,6 +17,7 @@
#include <asm/efi.h>
static bool request_mem_succeeded = false;
+static bool nowc = false;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ else if (!strcmp(this_opt, "nowc"))
+ nowc = true;
}
}
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (nowc)
+ info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+ else
+ info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c166e0725be5..ba82f97fb42b 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
-
+ fb_dealloc_cmap(&info->cmap);
pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
-
- fb_dealloc_cmap(&info->cmap);
- kfree(info->pseudo_palette);
- framebuffer_release(info);
-
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
fbi->map_dma);
-
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
+ kfree(info->pseudo_palette);
+ framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index eecf695c16f4..09e5bb013d28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
static int __init omap_dss_probe(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
core.pdev = pdev;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 007a4f366086..1c4797e53f68 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
+ unsigned flags = PCI_IRQ_MSIX;
unsigned i, v;
int err = -ENOMEM;
@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
+ if (desc) {
+ flags |= PCI_IRQ_AFFINITY;
+ desc->pre_vectors++; /* virtio config vector */
+ }
+
err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
- nvectors, PCI_IRQ_MSIX |
- (desc ? PCI_IRQ_AFFINITY : 0),
- desc);
+ nvectors, flags, desc);
if (err < 0)
goto error;
vp_dev->msix_enabled = 1;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 8feab810aed9..7f188b8d0c67 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -7,9 +7,6 @@ obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp)
-CFLAGS_efi.o += -fshort-wchar
-LDFLAGS += $(call ld-option, --no-wchar-size-warning)
-
dom0-$(CONFIG_ARM64) += arm-device.o
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 4da69dbf7dca..1bdd02a6d6ac 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
/*
* XXX: Add support for merging bio_vec when using different page
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index bae1f5d36c26..2d43118077e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
static void enable_pirq(struct irq_data *data)
{
- startup_pirq(data);
+ enable_dynirq(data);
}
static void disable_pirq(struct irq_data *data)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f3bf8f4e2d6c..82360594fa8e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
mutex_unlock(&priv->lock);
}
-static void mn_invl_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
-}
-
static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn,
static const struct mmu_notifier_ops gntdev_mmu_ops = {
.release = mn_release,
- .invalidate_page = mn_invl_page,
.invalidate_range_start = mn_invl_range_start,
};
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e46080214955..3e59590c7254 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
struct list_head *ent;
struct xs_watch_event *event;
+ xenwatch_pid = current->pid;
+
for (;;) {
wait_event_interruptible(watch_events_waitq,
!list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
task = kthread_run(xenwatch_thread, NULL, "xenwatch");
if (IS_ERR(task))
return PTR_ERR(task);
- xenwatch_pid = task->pid;
/* shutdown watches for kexec boot */
xs_reset_watches();