summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs236
-rw-r--r--Documentation/ABI/testing/sysfs-ptp20
-rw-r--r--Documentation/dev-tools/kunit/running_tips.rst14
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml22
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml6
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/gpmc-eth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc,lan9115.yaml110
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml56
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml32
-rw-r--r--Documentation/devicetree/bindings/usb/nxp,isp1760.yaml2
-rw-r--r--Documentation/driver-api/early-userspace/early_userspace_support.rst8
-rw-r--r--Documentation/features/core/thread-info-in-task/arch-support.txt32
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt2
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.rst2
-rw-r--r--Documentation/networking/ethtool-netlink.rst22
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst6
-rw-r--r--Documentation/networking/tipc.rst121
-rw-r--r--Documentation/translations/zh_CN/process/2.Process.rst4
-rw-r--r--LICENSES/dual/CC-BY-4.02
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile13
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts9
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts6
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts4
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts5
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/configs/integrator_defconfig5
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/realview_defconfig4
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/u8500_defconfig5
-rw-r--r--arch/arm/configs/versatile_defconfig4
-rw-r--r--arch/arm/configs/vexpress_defconfig17
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi13
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi4
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/smp_plat.h1
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/mte.c15
-rw-r--r--arch/arm64/lib/copy_from_user.S13
-rw-r--r--arch/arm64/lib/copy_in_user.S21
-rw-r--r--arch/arm64/lib/copy_to_user.S14
-rw-r--r--arch/arm64/lib/strlen.S10
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c1
-rw-r--r--arch/s390/kernel/uprobes.c1
-rw-r--r--arch/x86/kvm/cpuid.c30
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/mmu/paging.h14
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/mmu/spte.h6
-rw-r--r--arch/x86/kvm/svm/nested.c53
-rw-r--r--arch/x86/kvm/svm/sev.c14
-rw-r--r--arch/x86/kvm/svm/svm.c77
-rw-r--r--arch/x86/kvm/svm/svm.h5
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/net/bpf_jit_comp.c3
-rw-r--r--block/Kconfig26
-rw-r--r--block/Makefile3
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/bsg-lib.c90
-rw-r--r--block/bsg.c463
-rw-r--r--block/scsi_ioctl.c890
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/xen-blkfront.c224
-rw-r--r--drivers/cdrom/cdrom.c78
-rw-r--r--drivers/char/powernv-op-panel.c1
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c79
-rw-r--r--drivers/clk/renesas/renesas-rzg2l-cpg.c110
-rw-r--r--drivers/clk/renesas/renesas-rzg2l-cpg.h37
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/dma-buf/sync_file.c13
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/firmware/arm_ffa/bus.c6
-rw-r--r--drivers/firmware/arm_ffa/driver.c8
-rw-r--r--drivers/firmware/arm_scmi/bus.c8
-rw-r--r--drivers/firmware/arm_scmi/driver.c14
-rw-r--r--drivers/firmware/arm_scmi/notify.c4
-rw-r--r--drivers/firmware/arm_scmi/sensors.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h355
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h531
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c311
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c49
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c13
-rw-r--r--drivers/iommu/intel/iommu.c34
-rw-r--r--drivers/iommu/rockchip-iommu.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c4
-rw-r--r--drivers/net/bonding/bond_main.c181
-rw-r--r--drivers/net/caif/Kconfig9
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_hsi.c1454
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c292
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c173
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c56
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/fddi/defza.c3
-rw-r--r--drivers/net/netdevsim/ipsec.c8
-rw-r--r--drivers/net/phy/marvell10g.c40
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c22
-rw-r--r--drivers/net/wan/hdlc_cisco.c8
-rw-r--r--drivers/net/wan/hdlc_fr.c8
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hdlc_raw.c8
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c11
-rw-r--r--drivers/nvme/host/pci.c67
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/abx500_chargalg.c1
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_clock.c44
-rw-r--r--drivers/ptp/ptp_private.h39
-rw-r--r--drivers/ptp/ptp_sysfs.c160
-rw-r--r--drivers/ptp/ptp_vclock.c219
-rw-r--r--drivers/pwm/pwm-berlin.c9
-rw-r--r--drivers/pwm/pwm-ep93xx.c85
-rw-r--r--drivers/pwm/pwm-spear.c9
-rw-r--r--drivers/pwm/pwm-sprd.c11
-rw-r--r--drivers/pwm/pwm-tiecap.c15
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c6
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aha1542.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c84
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c73
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/libsas/Makefile2
-rw-r--r--drivers/scsi/libsas/sas_ata.c6
-rw-r--r--drivers/scsi/libsas/sas_discover.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c2
-rw-r--r--drivers/scsi/libsas/sas_phy.c2
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c128
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c198
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c194
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c317
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h40
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c37
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c119
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/myrs.c11
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/pcmcia/fdomain_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c8
-rw-r--r--drivers/scsi/qedi/qedi_fw.c9
-rw-r--r--drivers/scsi/qla1280.c5
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h195
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c3409
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h128
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h220
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h50
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c168
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c146
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c320
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c105
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c145
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c14
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_bsg.c106
-rw-r--r--drivers/scsi/scsi_common.c9
-rw-r--r--drivers/scsi/scsi_debug.c125
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c16
-rw-r--r--drivers/scsi/scsi_ioctl.c851
-rw-r--r--drivers/scsi/scsi_lib.c36
-rw-r--r--drivers/scsi/scsi_logging.c18
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c24
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/sd.c125
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c33
-rw-r--r--drivers/scsi/smartpqi/Kconfig8
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c68
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/snic_scsi.c12
-rw-r--r--drivers/scsi/sr.c143
-rw-r--r--drivers/scsi/st.c72
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c5
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/ufs/Kconfig16
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c7
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c32
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pltfrm.c7
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c7
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.c70
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.h24
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c7
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c7
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c7
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c25
-rw-r--r--drivers/scsi/ufs/ufs.h54
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c48
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c47
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h18
-rw-r--r--drivers/scsi/ufs/ufshcd.c612
-rw-r--r--drivers/scsi/ufs/ufshcd.h107
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/ufs/ufshpb.c2933
-rw-r--r--drivers/scsi/ufs/ufshpb.h323
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/xen-scsifront.c2
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c8
-rw-r--r--drivers/target/sbp/sbp_target.c4
-rw-r--r--drivers/target/target_core_alua.c94
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_transport.c33
-rw-r--r--drivers/target/target_core_user.c150
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c1
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--fs/btrfs/block-group.c367
-rw-r--r--fs/btrfs/block-group.h6
-rw-r--r--fs/btrfs/ctree.c67
-rw-r--r--fs/btrfs/inode.c147
-rw-r--r--fs/btrfs/transaction.c15
-rw-r--r--fs/btrfs/transaction.h9
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c355
-rw-r--r--fs/btrfs/volumes.h5
-rw-r--r--fs/cifs/cifs_dfs_ref.c6
-rw-r--r--fs/cifs/cifsglob.h7
-rw-r--r--fs/cifs/connect.c110
-rw-r--r--fs/cifs/dns_resolve.c10
-rw-r--r--fs/cifs/dns_resolve.h2
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb2ops.c6
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/configfs/file.c29
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fs_context.c54
-rw-r--r--fs/hfs/bfind.c14
-rw-r--r--fs/hfs/bnode.c25
-rw-r--r--fs/hfs/btree.h7
-rw-r--r--fs/hfs/super.c10
-rw-r--r--fs/io_uring.c8
-rw-r--r--fs/iomap/buffered-io.c8
-rw-r--r--fs/iomap/seek.c25
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/vboxsf/dir.c76
-rw-r--r--fs/vboxsf/file.c71
-rw-r--r--fs/vboxsf/vfsmod.h7
-rw-r--r--fs/xfs/libxfs/xfs_ag.c8
-rw-r--r--fs/xfs/libxfs/xfs_attr.c16
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c55
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c28
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c10
-rw-r--r--fs/xfs/scrub/inode.c18
-rw-r--r--fs/xfs/xfs_inode.c13
-rw-r--r--fs/xfs/xfs_ioctl.c27
-rw-r--r--fs/xfs/xfs_rtalloc.c49
-rw-r--r--fs/zonefs/super.c3
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h236
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/bsg-lib.h1
-rw-r--r--include/linux/bsg.h38
-rw-r--r--include/linux/cdrom.h6
-rw-r--r--include/linux/ethtool.h10
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/kasan.h1
-rw-r--r--include/linux/marvell_phy.h6
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/ptp_clock_kernel.h31
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/scmi_protocol.h14
-rw-r--r--include/linux/scpi_protocol.h8
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/math-emu/op-common.h2
-rw-r--r--include/net/bonding.h9
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/dst_metadata.h4
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/mptcp.h5
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sctp/constants.h4
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/scsi/scsi_cmnd.h38
-rw-r--r--include/scsi/scsi_device.h22
-rw-r--r--include/scsi/scsi_devinfo.h6
-rw-r--r--include/scsi/scsi_ioctl.h9
-rw-r--r--include/scsi/scsi_request.h2
-rw-r--r--include/soc/tegra/mc.h9
-rw-r--r--include/target/target_core_base.h8
-rw-r--r--include/uapi/linux/ethtool_netlink.h15
-rw-r--r--include/uapi/linux/net_tstamp.h17
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_log.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/uapi/linux/target_core_user.h2
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/bpf/devmap.c6
-rw-r--r--kernel/bpf/verifier.c60
-rw-r--r--kernel/cgroup/cgroup-v1.c12
-rw-r--r--kernel/debug/gdbstub.c2
-rw-r--r--kernel/rcu/refscale.c6
-rw-r--r--kernel/rcu/tasks.h6
-rw-r--r--kernel/rcu/tree_stall.h4
-rw-r--r--kernel/scftorture.c6
-rw-r--r--kernel/trace/trace_events_hist.c6
-rw-r--r--lib/test_hmm.c2
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/migrate.c48
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/rmap.c39
-rw-r--r--mm/slab.h15
-rw-r--r--mm/slub.c93
-rw-r--r--mm/util.c10
-rw-r--r--net/802/garp.c14
-rw-r--r--net/802/mrp.c14
-rw-r--r--net/bridge/br_if.c17
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c71
-rw-r--r--net/dsa/switch.c8
-rw-r--r--net/ethtool/Makefile2
-rw-r--r--net/ethtool/common.c14
-rw-r--r--net/ethtool/netlink.c10
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ethtool/phc_vclocks.c94
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/ip_tunnel.c18
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/raw_diag.c7
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/udp_diag.c6
-rw-r--r--net/ipv4/udp_offload.c6
-rw-r--r--net/ipv6/ip6_output.c32
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/iucv.c22
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/mptcp_diag.c6
-rw-r--r--net/mptcp/options.c19
-rw-r--r--net/mptcp/protocol.c12
-rw-r--r--net/mptcp/protocol.h10
-rw-r--r--net/mptcp/sockopt.c68
-rw-r--r--net/mptcp/subflow.c11
-rw-r--r--net/mptcp/syncookies.c16
-rw-r--r--net/ncsi/Kconfig6
-rw-r--r--net/ncsi/internal.h5
-rw-r--r--net/ncsi/ncsi-manage.c51
-rw-r--r--net/ncsi/ncsi-rsp.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c11
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c69
-rw-r--r--net/netfilter/nf_conntrack_standalone.c10
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_last.c12
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/flow_table.c6
-rw-r--r--net/sched/act_ct.c14
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sctp/diag.c6
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/transport.c11
-rw-r--r--net/socket.c19
-rw-r--r--net/unix/diag.c6
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/xdpsock_user.c28
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/setlocalversion13
-rwxr-xr-xscripts/spdxcheck.py2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-adda.c1
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/bpf/Makefile7
-rw-r--r--tools/bpf/bpftool/jit_disasm.c6
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c2
-rw-r--r--tools/include/linux/kconfig.h6
-rw-r--r--tools/include/uapi/asm-generic/unistd.h7
-rw-r--r--tools/lib/bpf/libbpf.c4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/builtin-inject.c13
-rw-r--r--tools/perf/builtin-report.c33
-rw-r--r--tools/perf/builtin-sched.c35
-rw-r--r--tools/perf/builtin-script.c8
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/builtin-trace.c45
-rw-r--r--tools/perf/tests/bpf.c2
-rw-r--r--tools/perf/tests/event_update.c6
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c3
-rw-r--r--tools/perf/tests/maps.c2
-rw-r--r--tools/perf/tests/parse-events.c16
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c3
-rw-r--r--tools/perf/tests/topology.c1
-rw-r--r--tools/perf/util/cs-etm.c168
-rw-r--r--tools/perf/util/data.c2
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dwarf-aux.c8
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/lzma.c8
-rw-r--r--tools/perf/util/map.c2
-rw-r--r--tools/perf/util/pfm.c2
-rw-r--r--tools/perf/util/pmu.c9
-rw-r--r--tools/perf/util/probe-event.c53
-rw-r--r--tools/perf/util/probe-event.h4
-rw-r--r--tools/perf/util/probe-file.c4
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h2
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-display.c14
-rwxr-xr-xtools/testing/kunit/kunit.py2
-rw-r--r--tools/testing/kunit/kunit_kernel.py6
-rw-r--r--tools/testing/kunit/kunit_parser.py6
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py16
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log (renamed from tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log)0
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c36
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c18
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h3
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c16
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c5
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmu_role_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c70
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh4
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh2
-rw-r--r--tools/testing/selftests/net/timestamping.c55
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh167
-rw-r--r--virt/kvm/coalesced_mmio.c2
-rw-r--r--virt/kvm/kvm_main.c2
655 files changed, 18637 insertions, 8587 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index b4a5d55fa19f..ec3a7149ced5 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1298,3 +1298,239 @@ Description: This node is used to set or display whether UFS WriteBooster is
(if the platform supports UFSHCD_CAP_CLK_SCALING). For a
platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
disable/enable WriteBooster through this sysfs node.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the HPB specification version.
+ The full information about the descriptor can be found in the UFS
+ HPB (Host Performance Booster) Extension specifications.
+ Example: version 1.2.3 = 0123h
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows an indication of the HPB control mode.
+ 00h: Host control mode
+ 01h: Device control mode
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the bHPBRegionSize which can be calculated
+ as in the following (in bytes):
+ HPB Region size = 512B * 2^bHPBRegionSize
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the maximum number of HPB LU supported by
+ the device.
+ 00h: HPB is not supported by the device.
+ 01h ~ 20h: Maximum number of HPB LU supported by the device
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the bHPBSubRegionSize, which can be
+ calculated as in the following (in bytes) and shall be a multiple of
+ logical block size:
+ HPB Sub-Region size = 512B x 2^bHPBSubRegionSize
+ bHPBSubRegionSize shall not exceed bHPBRegionSize.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the maximum number of active HPB regions that
+ is supported by the device.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the maximum number of HPB regions assigned to
+ the HPB logical unit.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the start offset of HPB pinned region.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of HPB pinned regions assigned to
+ the HPB logical unit.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/hit_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of reads that changed to HPB read.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/miss_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of reads that cannot be changed to
+ HPB read.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of response UPIUs that has
+ recommendations for activating sub-regions and/or inactivating region.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of active sub-regions recommended by
+ response UPIUs.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of inactive regions recommended by
+ response UPIUs.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_stats/map_req_cnt
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the number of read buffer commands for
+ activating sub-regions recommended by response UPIUs.
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_params/requeue_timeout_ms
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the requeue timeout threshold for write buffer
+ command in ms. The value can be changed by writing an integer to
+ this entry.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the maximum HPB data size for using a single HPB
+ command.
+
+ === ========
+ 00h 4KB
+ 01h 8KB
+ 02h 12KB
+ ...
+ FFh 1024KB
+ === ========
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/flags/hpb_enable
+Date: June 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows the status of HPB.
+
+ == ============================
+ 0 HPB is not enabled.
+ 1 HPB is enabled
+ == ============================
+
+ The file is read only.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/activation_thld
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: In host control mode, reads are the major source of activation
+ trials. Once this threshold hs met, the region is added to the
+ "to-be-activated" list. Since we reset the read counter upon
+ write, this include sending a rb command updating the region
+ ppn as well.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/normalization_factor
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: In host control mode, we think of the regions as "buckets".
+ Those buckets are being filled with reads, and emptied on write.
+ We use entries_per_srgn - the amount of blocks in a subregion as
+ our bucket size. This applies because HPB1.0 only handles
+ single-block reads. Once the bucket size is crossed, we trigger
+ a normalization work - not only to avoid overflow, but mainly
+ because we want to keep those counters normalized, as we are
+ using those reads as a comparative score, to make various decisions.
+ The normalization is dividing (shift right) the read counter by
+ the normalization_factor. If during consecutive normalizations
+ an active region has exhausted its reads - inactivate it.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_enter
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: Region deactivation is often due to the fact that eviction took
+ place: A region becomes active at the expense of another. This is
+ happening when the max-active-regions limit has been crossed.
+ In host mode, eviction is considered an extreme measure. We
+ want to verify that the entering region has enough reads, and
+ the exiting region has much fewer reads. eviction_thld_enter is
+ the min reads that a region must have in order to be considered
+ a candidate for evicting another region.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_exit
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: Same as above for the exiting region. A region is considered to
+ be a candidate for eviction only if it has fewer reads than
+ eviction_thld_exit.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_ms
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: In order not to hang on to "cold" regions, we inactivate
+ a region that has no READ access for a predefined amount of
+ time - read_timeout_ms. If read_timeout_ms has expired, and the
+ region is dirty, it is less likely that we can make any use of
+ HPB reading it so we inactivate it. Still, deactivation has
+ its overhead, and we may still benefit from HPB reading this
+ region if it is clean - see read_timeout_expiries.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_expiries
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: If the region read timeout has expired, but the region is clean,
+ just re-wind its timer for another spin. Do that as long as it
+ is clean and did not exhaust its read_timeout_expiries threshold.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/timeout_polling_interval_ms
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: The frequency with which the delayed worker that checks the
+ read_timeouts is awakened.
+
+What: /sys/class/scsi_device/*/device/hpb_param_sysfs/inflight_map_req
+Date: February 2021
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: In host control mode the host is the originator of map requests.
+ To avoid flooding the device with map requests, use a simple throttling
+ mechanism that limits the number of inflight map requests.
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp
index 2363ad810ddb..d378f57c1b73 100644
--- a/Documentation/ABI/testing/sysfs-ptp
+++ b/Documentation/ABI/testing/sysfs-ptp
@@ -33,6 +33,13 @@ Description:
frequency adjustment value (a positive integer) in
parts per billion.
+What: /sys/class/ptp/ptpN/max_vclocks
+Date: May 2021
+Contact: Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+ This file contains the maximum number of ptp vclocks.
+ Write integer to re-configure it.
+
What: /sys/class/ptp/ptpN/n_alarms
Date: September 2010
Contact: Richard Cochran <richardcochran@gmail.com>
@@ -61,6 +68,19 @@ Description:
This file contains the number of programmable pins
offered by the PTP hardware clock.
+What: /sys/class/ptp/ptpN/n_vclocks
+Date: May 2021
+Contact: Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+ This file contains the number of virtual PTP clocks in
+ use. By default, the value is 0 meaning that only the
+ physical clock is in use. Setting the value creates
+ the corresponding number of virtual clocks and causes
+ the physical clock to become free running. Setting the
+ value back to 0 deletes the virtual clocks and
+ switches the physical clock back to normal, adjustable
+ operation.
+
What: /sys/class/ptp/ptpN/pins
Date: March 2014
Contact: Richard Cochran <richardcochran@gmail.com>
diff --git a/Documentation/dev-tools/kunit/running_tips.rst b/Documentation/dev-tools/kunit/running_tips.rst
index 7d99386cf94a..d1626d548fa5 100644
--- a/Documentation/dev-tools/kunit/running_tips.rst
+++ b/Documentation/dev-tools/kunit/running_tips.rst
@@ -86,19 +86,7 @@ Generating code coverage reports under UML
.. note::
TODO(brendanhiggins@google.com): There are various issues with UML and
versions of gcc 7 and up. You're likely to run into missing ``.gcda``
- files or compile errors. We know one `faulty GCC commit
- <https://github.com/gcc-mirror/gcc/commit/8c9434c2f9358b8b8bad2c1990edf10a21645f9d>`_
- but not how we'd go about getting this fixed. The compile errors still
- need some investigation.
-
-.. note::
- TODO(brendanhiggins@google.com): for recent versions of Linux
- (5.10-5.12, maybe earlier), there's a bug with gcov counters not being
- flushed in UML. This translates to very low (<1%) reported coverage. This is
- related to the above issue and can be worked around by replacing the
- one call to ``uml_abort()`` (it's in ``os_dump_core()``) with a plain
- ``exit()``.
-
+ files or compile errors.
This is different from the "normal" way of getting coverage information that is
documented in Documentation/dev-tools/gcov.rst.
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
index 8dc7b404ee12..1174c9aa9934 100644
--- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
+++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
@@ -50,7 +50,6 @@ properties:
reg:
minItems: 1
- maxItems: 3
items:
- description: base register
- description: power register
diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml
index 5f4345d43020..e3ca5389c17d 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,du.yaml
@@ -92,7 +92,6 @@ required:
- reg
- clocks
- interrupts
- - resets
- ports
allOf:
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index ad0ec9f35bd8..7d9c083632b9 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -39,17 +39,7 @@ properties:
reg:
maxItems: 1
-patternProperties:
- "^adi,bypass-attenuator-in[0-4]$":
- description: |
- Configures bypassing the individual voltage input attenuator. If
- set to 1 the attenuator is bypassed if set to 0 the attenuator is
- not bypassed. If the property is absent then the attenuator
- retains it's configuration from the bios/bootloader.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1]
-
- "^adi,pwm-active-state$":
+ adi,pwm-active-state:
description: |
Integer array, represents the active state of the pwm outputs If set to 0
the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm
@@ -61,6 +51,16 @@ patternProperties:
enum: [0, 1]
default: 1
+patternProperties:
+ "^adi,bypass-attenuator-in[0-4]$":
+ description: |
+ Configures bypassing the individual voltage input attenuator. If
+ set to 1 the attenuator is bypassed if set to 0 the attenuator is
+ not bypassed. If the property is absent then the attenuator
+ retains it's configuration from the bios/bootloader.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 1181b590db71..03f2b2d4db30 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -52,16 +52,14 @@ properties:
items:
- const: marvell,ap806-smmu-500
- const: arm,mmu-500
- - description: NVIDIA SoCs that program two ARM MMU-500s identically
- items:
- description: NVIDIA SoCs that require memory controller interaction
and may program multiple ARM MMU-500s identically with the memory
controller interleaving translations between multiple instances
for improved performance.
items:
- enum:
- - const: nvidia,tegra194-smmu
- - const: nvidia,tegra186-smmu
+ - nvidia,tegra194-smmu
+ - nvidia,tegra186-smmu
- const: nvidia,smmu-500
- items:
- const: arm,mmu-500
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
index d2e28a9e3545..ba9124f721f1 100644
--- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
@@ -28,14 +28,12 @@ properties:
- description: configuration registers for MMU instance 0
- description: configuration registers for MMU instance 1
minItems: 1
- maxItems: 2
interrupts:
items:
- description: interruption for MMU instance 0
- description: interruption for MMU instance 1
minItems: 1
- maxItems: 2
clocks:
items:
diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
index 7a63c85ef8c5..01c9acf9275d 100644
--- a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
@@ -57,7 +57,6 @@ properties:
ranges:
minItems: 1
- maxItems: 3
description: |
Memory bus areas for interacting with the devices. Reflects
the memory layout with four integer values following:
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
index e5f1a33332a5..dd5a64969e37 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
@@ -84,7 +84,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 3
items:
- description: NAND CTLRDY interrupt
- description: FLASH_DMA_DONE if flash DMA is available
@@ -92,7 +91,6 @@ properties:
interrupt-names:
minItems: 1
- maxItems: 3
items:
- const: nand_ctlrdy
- const: flash_dma_done
@@ -148,8 +146,6 @@ allOf:
then:
properties:
reg-names:
- minItems: 2
- maxItems: 2
items:
- const: nand
- const: nand-int-base
@@ -161,8 +157,6 @@ allOf:
then:
properties:
reg-names:
- minItems: 3
- maxItems: 3
items:
- const: nand
- const: nand-int-base
@@ -175,8 +169,6 @@ allOf:
then:
properties:
reg-names:
- minItems: 3
- maxItems: 3
items:
- const: nand
- const: iproc-idm
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index 0b8a05dd52e6..f978f8719d8e 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -67,8 +67,8 @@ properties:
reg:
oneOf:
- enum:
- - 0
- - 1
+ - 0
+ - 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt
index f7da3d73ca1b..32821066a85b 100644
--- a/Documentation/devicetree/bindings/net/gpmc-eth.txt
+++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt
@@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
For the properties relevant to the ethernet controller connected to the GPMC
refer to the binding documentation of the device. For example, the documentation
-for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt
+for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml
Child nodes need to specify the GPMC bus address width using the "bank-width"
property but is possible that an ethernet controller also has a property to
diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml
new file mode 100644
index 000000000000..f86667cbcca8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
+
+maintainers:
+ - Shawn Guo <shawnguo@kernel.org>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: smsc,lan9115
+ - items:
+ - enum:
+ - smsc,lan89218
+ - smsc,lan9117
+ - smsc,lan9118
+ - smsc,lan9220
+ - smsc,lan9221
+ - const: smsc,lan9115
+
+ reg:
+ maxItems: 1
+
+ reg-shift: true
+
+ reg-io-width:
+ enum: [ 2, 4 ]
+ default: 2
+
+ interrupts:
+ minItems: 1
+ items:
+ - description:
+ LAN interrupt line
+ - description:
+ Optional PME (power management event) interrupt that is able to wake
+ up the host system with a 50ms pulse on network activity
+
+ clocks:
+ maxItems: 1
+
+ phy-mode: true
+
+ smsc,irq-active-high:
+ type: boolean
+ description: Indicates the IRQ polarity is active-high
+
+ smsc,irq-push-pull:
+ type: boolean
+ description: Indicates the IRQ type is push-pull
+
+ smsc,force-internal-phy:
+ type: boolean
+ description: Forces SMSC LAN controller to use internal PHY
+
+ smsc,force-external-phy:
+ type: boolean
+ description: Forces SMSC LAN controller to use external PHY
+
+ smsc,save-mac-address:
+ type: boolean
+ description:
+ Indicates that MAC address needs to be saved before resetting the
+ controller
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ A GPIO line connected to the RESET (active low) signal of the device.
+ On many systems this is wired high so the device goes out of reset at
+ power-on, but if it is under program control, this optional GPIO can
+ wake up in response to it.
+
+ vdd33a-supply:
+ description: 3.3V analog power supply
+
+ vddvario-supply:
+ description: IO logic power supply
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*",
+# "gpmc,*", ...) to be found, that actually depend on the compatible value of
+# the parent node.
+additionalProperties: true
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ ethernet@f4000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0xf4000000 0x2000000>;
+ phy-mode = "mii";
+ interrupt-parent = <&gpio1>;
+ interrupts = <31>, <32>;
+ reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+ reg-io-width = <4>;
+ smsc,irq-push-pull;
+ };
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
deleted file mode 100644
index acfafc8e143c..000000000000
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
-
-Required properties:
-- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
-- reg : Address and length of the io space for SMSC LAN
-- interrupts : one or two interrupt specifiers
- - The first interrupt is the SMSC LAN interrupt line
- - The second interrupt (if present) is the PME (power
- management event) interrupt that is able to wake up the host
- system with a 50ms pulse on network activity
-- phy-mode : See ethernet.txt file in the same directory
-
-Optional properties:
-- reg-shift : Specify the quantity to shift the register offsets by
-- reg-io-width : Specify the size (in bytes) of the IO accesses that
- should be performed on the device. Valid value for SMSC LAN is
- 2 or 4. If it's omitted or invalid, the size would be 2.
-- smsc,irq-active-high : Indicates the IRQ polarity is active-high
-- smsc,irq-push-pull : Indicates the IRQ type is push-pull
-- smsc,force-internal-phy : Forces SMSC LAN controller to use
- internal PHY
-- smsc,force-external-phy : Forces SMSC LAN controller to use
- external PHY
-- smsc,save-mac-address : Indicates that mac address needs to be saved
- before resetting the controller
-- reset-gpios : a GPIO line connected to the RESET (active low) signal
- of the device. On many systems this is wired high so the device goes
- out of reset at power-on, but if it is under program control, this
- optional GPIO can wake up in response to it.
-- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
-
-Examples:
-
-lan9220@f4000000 {
- compatible = "smsc,lan9220", "smsc,lan9115";
- reg = <0xf4000000 0x2000000>;
- phy-mode = "mii";
- interrupt-parent = <&gpio1>;
- interrupts = <31>, <32>;
- reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
- reg-io-width = <4>;
- smsc,irq-push-pull;
-};
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
index 5272b6f284ba..dcd63908aeae 100644
--- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
@@ -77,6 +77,34 @@ properties:
Type-C spec states minimum CC pin debounce of 100 ms and maximum
of 200 ms. However, some solutions might need more than 200 ms.
+ refclk-dig:
+ type: object
+ description: |
+ WIZ node should have subnode for refclk_dig to select the reference
+ clock source for the reference clock used in the PHY and PMA digital
+ logic.
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 4
+ description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
+ the inputs to refclk_dig
+
+ "#clock-cells":
+ const: 0
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+ required:
+ - clocks
+ - "#clock-cells"
+ - assigned-clocks
+ - assigned-clock-parents
+
patternProperties:
"^pll[0|1]-refclk$":
type: object
@@ -121,34 +149,6 @@ patternProperties:
- clocks
- "#clock-cells"
- "^refclk-dig$":
- type: object
- description: |
- WIZ node should have subnode for refclk_dig to select the reference
- clock source for the reference clock used in the PHY and PMA digital
- logic.
- properties:
- clocks:
- minItems: 2
- maxItems: 4
- description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
- the inputs to refclk_dig
-
- "#clock-cells":
- const: 0
-
- assigned-clocks:
- maxItems: 1
-
- assigned-clock-parents:
- maxItems: 1
-
- required:
- - clocks
- - "#clock-cells"
- - assigned-clocks
- - assigned-clock-parents
-
"^serdes@[0-9a-f]+$":
type: object
description: |
diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
index 12b8963615c3..c2e8c54e5311 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
@@ -36,12 +36,12 @@ properties:
switching frequency must be one of following corresponding value
1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz
- patternProperties:
- "^ldo[1-4]$":
+ ldortc:
type: object
$ref: regulator.yaml#
- "^ldortc$":
+ patternProperties:
+ "^ldo[1-4]$":
type: object
$ref: regulator.yaml#
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
index 8761437ed8ad..aabf50f5b39e 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
@@ -83,7 +83,8 @@ properties:
unevaluatedProperties: false
- "^vsnvs$":
+ properties:
+ vsnvs:
type: object
$ref: regulator.yaml#
description:
diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
index 657c13b62b67..056d42daae06 100644
--- a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
+++ b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
@@ -30,7 +30,6 @@ properties:
maxItems: 1
clocks:
- minItems: 2
items:
- description: PCLK clocks
- description: EXTCLK clocks. Faraday calls it CLK1HZ and says the clock
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index faef4f6f55b8..8246891602e7 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -79,22 +79,7 @@ properties:
description:
The SPI controller acts as a slave, instead of a master.
-allOf:
- - if:
- not:
- required:
- - spi-slave
- then:
- properties:
- "#address-cells":
- const: 1
- else:
- properties:
- "#address-cells":
- const: 0
-
-patternProperties:
- "^slave$":
+ slave:
type: object
properties:
@@ -105,6 +90,7 @@ patternProperties:
required:
- compatible
+patternProperties:
"^.*@[0-9a-f]+$":
type: object
@@ -180,6 +166,20 @@ patternProperties:
- compatible
- reg
+allOf:
+ - if:
+ not:
+ required:
+ - spi-slave
+ then:
+ properties:
+ "#address-cells":
+ const: 1
+ else:
+ properties:
+ "#address-cells":
+ const: 0
+
additionalProperties: true
examples:
diff --git a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
index a88f99adfe8e..f238848ad094 100644
--- a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+++ b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
@@ -25,14 +25,12 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: Host controller interrupt
- description: Device controller interrupt in isp1761
interrupt-names:
minItems: 1
- maxItems: 2
items:
- const: host
- const: peripheral
diff --git a/Documentation/driver-api/early-userspace/early_userspace_support.rst b/Documentation/driver-api/early-userspace/early_userspace_support.rst
index 8a58c61932ff..61bdeac1bae5 100644
--- a/Documentation/driver-api/early-userspace/early_userspace_support.rst
+++ b/Documentation/driver-api/early-userspace/early_userspace_support.rst
@@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user.
As a technical note, when directories and files are specified, the
entire CONFIG_INITRAMFS_SOURCE is passed to
-usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE
+usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE
can really be interpreted as any legal argument to
-gen_initramfs_list.sh. If a directory is specified as an argument then
+gen_initramfs.sh. If a directory is specified as an argument then
the contents are scanned, uid/gid translation is performed, and
usr/gen_init_cpio file directives are output. If a directory is
-specified as an argument to usr/gen_initramfs_list.sh then the
+specified as an argument to usr/gen_initramfs.sh then the
contents of the file are simply copied to the output. All of the output
directives from directory scanning and file contents copying are
processed by usr/gen_init_cpio.
-See also 'usr/gen_initramfs_list.sh -h'.
+See also 'usr/gen_initramfs.sh -h'.
Where's this all leading?
=========================
diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt
new file mode 100644
index 000000000000..9f0259bbd7df
--- /dev/null
+++ b/Documentation/features/core/thread-info-in-task/arch-support.txt
@@ -0,0 +1,32 @@
+#
+# Feature name: thread-info-in-task
+# Kconfig: THREAD_INFO_IN_TASK
+# description: arch makes use of the core kernel facility to embedd thread_info in task_struct
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | TODO |
+ | arm: | TODO |
+ | arm64: | ok |
+ | csky: | TODO |
+ | h8300: | TODO |
+ | hexagon: | TODO |
+ | ia64: | TODO |
+ | m68k: | TODO |
+ | microblaze: | TODO |
+ | mips: | TODO |
+ | nds32: | ok |
+ | nios2: | TODO |
+ | openrisc: | TODO |
+ | parisc: | TODO |
+ | powerpc: | ok |
+ | riscv: | ok |
+ | s390: | ok |
+ | sh: | TODO |
+ | sparc: | TODO |
+ | um: | TODO |
+ | x86: | ok |
+ | xtensa: | TODO |
+ -----------------------
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 8639fe8315f5..8dcaab070c7b 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -22,7 +22,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | TODO |
| sh: | ok |
| sparc: | TODO |
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst
index 4598b0d90b60..164960631925 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst
@@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de
The kernel does not depend on external cpio tools. If you specify a
directory instead of a configuration file, the kernel's build infrastructure
creates a configuration file from that directory (usr/Makefile calls
-usr/gen_initramfs_list.sh), and proceeds to package up that directory
+usr/gen_initramfs.sh), and proceeds to package up that directory
using the config file (by feeding it to usr/gen_init_cpio, which is created
from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is
entirely self-contained, and the kernel's boot-time extractor is also
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 6ea91e41593f..c86628e6a235 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -212,6 +212,7 @@ Userspace to kernel:
``ETHTOOL_MSG_FEC_SET`` set FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET`` get standard statistics
+ ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info
===================================== ================================
Kernel to userspace:
@@ -250,6 +251,7 @@ Kernel to userspace:
``ETHTOOL_MSG_FEC_NTF`` FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics
+ ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example:
etherStatsPkts512to1023Octets 512 1023
============================= ==== ====
+PHC_VCLOCKS_GET
+===============
+
+Query device PHC virtual clocks information.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header
+ ``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number
+ ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array
+ ==================================== ====== ==========================
+
Request translation
===================
@@ -1575,4 +1596,5 @@ are netlink only.
n/a ``ETHTOOL_MSG_CABLE_TEST_ACT``
n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT``
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
+ n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
=================================== =====================================
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 0467b30e4abe..d31ed6c1cb0d 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN
Be conservative in what you do, be liberal in what you accept from others.
If it's non-zero, we mark only out of window RST segments as INVALID.
+nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN
+ - 0 - disabled (default)
+ - 1 - enabled
+
+ If it's 1, we don't mark out of window RST segments as INVALID.
+
nf_conntrack_tcp_loose - BOOLEAN
- 0 - disabled
- not 0 - enabled (default)
diff --git a/Documentation/networking/tipc.rst b/Documentation/networking/tipc.rst
index 76775f24cdc8..ab63d298cca2 100644
--- a/Documentation/networking/tipc.rst
+++ b/Documentation/networking/tipc.rst
@@ -4,10 +4,125 @@
Linux Kernel TIPC
=================
-TIPC (Transparent Inter Process Communication) is a protocol that is
-specially designed for intra-cluster communication.
+Introduction
+============
-For more information about TIPC, see http://tipc.sourceforge.net.
+TIPC (Transparent Inter Process Communication) is a protocol that is specially
+designed for intra-cluster communication. It can be configured to transmit
+messages either on UDP or directly across Ethernet. Message delivery is
+sequence guaranteed, loss free and flow controlled. Latency times are shorter
+than with any other known protocol, while maximal throughput is comparable to
+that of TCP.
+
+TIPC Features
+-------------
+
+- Cluster wide IPC service
+
+ Have you ever wished you had the convenience of Unix Domain Sockets even when
+ transmitting data between cluster nodes? Where you yourself determine the
+ addresses you want to bind to and use? Where you don't have to perform DNS
+ lookups and worry about IP addresses? Where you don't have to start timers
+ to monitor the continuous existence of peer sockets? And yet without the
+ downsides of that socket type, such as the risk of lingering inodes?
+
+ Welcome to the Transparent Inter Process Communication service, TIPC in short,
+ which gives you all of this, and a lot more.
+
+- Service Addressing
+
+ A fundamental concept in TIPC is that of Service Addressing which makes it
+ possible for a programmer to chose his own address, bind it to a server
+ socket and let client programs use only that address for sending messages.
+
+- Service Tracking
+
+ A client wanting to wait for the availability of a server, uses the Service
+ Tracking mechanism to subscribe for binding and unbinding/close events for
+ sockets with the associated service address.
+
+ The service tracking mechanism can also be used for Cluster Topology Tracking,
+ i.e., subscribing for availability/non-availability of cluster nodes.
+
+ Likewise, the service tracking mechanism can be used for Cluster Connectivity
+ Tracking, i.e., subscribing for up/down events for individual links between
+ cluster nodes.
+
+- Transmission Modes
+
+ Using a service address, a client can send datagram messages to a server socket.
+
+ Using the same address type, it can establish a connection towards an accepting
+ server socket.
+
+ It can also use a service address to create and join a Communication Group,
+ which is the TIPC manifestation of a brokerless message bus.
+
+ Multicast with very good performance and scalability is available both in
+ datagram mode and in communication group mode.
+
+- Inter Node Links
+
+ Communication between any two nodes in a cluster is maintained by one or two
+ Inter Node Links, which both guarantee data traffic integrity and monitor
+ the peer node's availability.
+
+- Cluster Scalability
+
+ By applying the Overlapping Ring Monitoring algorithm on the inter node links
+ it is possible to scale TIPC clusters up to 1000 nodes with a maintained
+ neighbor failure discovery time of 1-2 seconds. For smaller clusters this
+ time can be made much shorter.
+
+- Neighbor Discovery
+
+ Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP
+ multicast, when any of those services are available. If not, configured peer
+ IP addresses can be used.
+
+- Configuration
+
+ When running TIPC in single node mode no configuration whatsoever is needed.
+ When running in cluster mode TIPC must as a minimum be given a node address
+ (before Linux 4.17) and told which interface to attach to. The "tipc"
+ configuration tool makes is possible to add and maintain many more
+ configuration parameters.
+
+- Performance
+
+ TIPC message transfer latency times are better than in any other known protocol.
+ Maximal byte throughput for inter-node connections is still somewhat lower than
+ for TCP, while they are superior for intra-node and inter-container throughput
+ on the same host.
+
+- Language Support
+
+ The TIPC user API has support for C, Python, Perl, Ruby, D and Go.
+
+More Information
+----------------
+
+- How to set up TIPC:
+
+ http://tipc.io/getting_started.html
+
+- How to program with TIPC:
+
+ http://tipc.io/programming.html
+
+- How to contribute to TIPC:
+
+- http://tipc.io/contacts.html
+
+- More details about TIPC specification:
+
+ http://tipc.io/protocol.html
+
+
+Implementation
+==============
+
+TIPC is implemented as a kernel module in net/tipc/ directory.
TIPC Base Types
---------------
diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst
index 229629e305ca..4a6ed0219494 100644
--- a/Documentation/translations/zh_CN/process/2.Process.rst
+++ b/Documentation/translations/zh_CN/process/2.Process.rst
@@ -47,7 +47,7 @@
(顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是经
提前收集、测试和分级的。稍后将详细描述该过程的工作方式。)
-合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并
+合并窗口持续大约两周。在这段时间结束时,Linus Torvalds将声明窗口已关闭,并
释放第一个“rc”内核。例如,对于目标为5.6的内核,在合并窗口结束时发生的释放
将被称为5.6-rc1。-rc1 版本是一个信号,表示合并新特性的时间已经过去,稳定下一
个内核的时间已经到来。
@@ -168,7 +168,7 @@ Greg Kroah-Hartman领导。稳定团队将使用5.x.y编号方案不定期地发
补丁如何进入内核
----------------
-只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入
+只有一个人可以将补丁合并到主线内核存储库中:Linus Torvalds。但是,在进入
2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。
内核项目已经发展到一个没有一个开发人员可以在没有支持的情况下检查和选择每个
补丁的规模。内核开发人员处理这种增长的方式是使用围绕信任链构建的助理系统。
diff --git a/LICENSES/dual/CC-BY-4.0 b/LICENSES/dual/CC-BY-4.0
index 45a81b8e4669..869cad3d1643 100644
--- a/LICENSES/dual/CC-BY-4.0
+++ b/LICENSES/dual/CC-BY-4.0
@@ -392,7 +392,7 @@ Section 8 -- Interpretation.
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
-will be considered the “Licensor.” The text of the Creative Commons
+will be considered the "Licensor." The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
diff --git a/MAINTAINERS b/MAINTAINERS
index a61f4f3b78a9..6c8be735cc91 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -933,6 +933,7 @@ F: drivers/video/fbdev/geode/
AMD IOMMU (AMD-VI)
M: Joerg Roedel <joro@8bytes.org>
+R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
L: iommu@lists.linux-foundation.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
@@ -15009,6 +15010,13 @@ F: drivers/net/phy/dp83640*
F: drivers/ptp/*
F: include/linux/ptp_cl*
+PTP VIRTUAL CLOCK SUPPORT
+M: Yangbo Lu <yangbo.lu@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/ptp/ptp_vclock.c
+F: net/ethtool/phc_vclocks.c
+
PTRACE SUPPORT
M: Oleg Nesterov <oleg@redhat.com>
S: Maintained
diff --git a/Makefile b/Makefile
index c3f9bd191b89..e4f5895badb5 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Opossums on Parade
# *DOCUMENTATION*
@@ -728,11 +728,12 @@ $(KCONFIG_CONFIG):
# This exploits the 'multi-target pattern rule' trick.
# The syncconfig should be executed only once to make all the targets.
# (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
-quiet_cmd_syncconfig = SYNC $@
- cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig
-
+#
+# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
+# so you cannot notice that Kconfig is waiting for the user input.
%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
- +$(call cmd,syncconfig)
+ $(Q)$(kecho) " SYNC $@"
+ $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
# External modules and some install targets need include/generated/autoconf.h
# and include/config/auto.conf but do not care if they are up-to-date.
@@ -802,7 +803,7 @@ else
# Warn about unmarked fall-throughs in switch statement.
# Disabled for clang while comment to attribute conversion happens and
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
-KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
+KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,)
endif
# These warnings generated too much noise in a regular build.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3ea1c417339f..82f908fa5676 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -395,7 +395,7 @@ config ARCH_IXP4XX
select IXP4XX_IRQ
select IXP4XX_TIMER
# With the new PCI driver this is not needed
- select NEED_MACH_IO_H if PCI_IXP4XX_LEGACY
+ select NEED_MACH_IO_H if IXP4XX_PCI_LEGACY
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
help
diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
index 33e413ca07e4..9b4cf5ebe6d5 100644
--- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
@@ -4,6 +4,7 @@
#include "aspeed-g5.dtsi"
#include <dt-bindings/gpio/aspeed-gpio.h>
#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/{
model = "ASRock E3C246D4I BMC";
@@ -73,7 +74,8 @@
&vuart {
status = "okay";
- aspeed,sirq-active-high;
+ aspeed,lpc-io-reg = <0x2f8>;
+ aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
};
&mac0 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
index d26a9e16ff7c..aa24cac8e5be 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
@@ -406,14 +406,14 @@
reg = <0x69>;
};
- power-supply@6a {
+ power-supply@6b {
compatible = "ibm,cffps";
- reg = <0x6a>;
+ reg = <0x6b>;
};
- power-supply@6b {
+ power-supply@6d {
compatible = "ibm,cffps";
- reg = <0x6b>;
+ reg = <0x6d>;
};
};
@@ -2832,6 +2832,7 @@
&emmc {
status = "okay";
+ clk-phase-mmc-hs200 = <180>, <180>;
};
&fsim0 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 941c0489479a..481d0ee1f85f 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -280,10 +280,7 @@
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
- /*Z0-Z7*/ "","","","","","","","",
- /*AA0-AA7*/ "","","","","","","","",
- /*AB0-AB7*/ "","","","","","","","",
- /*AC0-AC7*/ "","","","","","","","";
+ /*Z0-Z7*/ "","","","","","","","";
pin_mclr_vpp {
gpio-hog;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
index e863ec088970..e33153dcaea8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
@@ -136,10 +136,7 @@
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
- /*Z0-Z7*/ "","","","","","","","",
- /*AA0-AA7*/ "","","","","","","","",
- /*AB0-AB7*/ "","","","","","","","",
- /*AC0-AC7*/ "","","","","","","","";
+ /*Z0-Z7*/ "","","","","","","","";
};
&fmc {
@@ -189,6 +186,7 @@
&emmc {
status = "okay";
+ clk-phase-mmc-hs200 = <36>, <270>;
};
&fsim0 {
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index dace8ffeb991..0a4ffd10c484 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -581,7 +581,7 @@
* EBI2. This has a 25MHz chrystal next to it, so no
* clocking is needed.
*/
- ethernet-ebi2@2,0 {
+ ethernet@2,0 {
compatible = "smsc,lan9221", "smsc,lan9115";
reg = <2 0x0 0x100>;
/*
@@ -598,8 +598,6 @@
phy-mode = "mii";
reg-io-width = <2>;
smsc,force-external-phy;
- /* IRQ on edge falling = active low */
- smsc,irq-active-low;
smsc,irq-push-pull;
/*
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 37bd41ff8dff..151c0220047d 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -195,16 +195,15 @@
#size-cells = <1>;
ranges;
- vic: intc@10140000 {
+ vic: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x1000>;
- clear-mask = <0xffffffff>;
valid-mask = <0xffffffff>;
};
- sic: intc@10003000 {
+ sic: interrupt-controller@10003000 {
compatible = "arm,versatile-sic";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 06a0fdf24026..e7e751a858d8 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -7,7 +7,7 @@
amba {
/* The Versatile PB is using more SIC IRQ lines than the AB */
- sic: intc@10003000 {
+ sic: interrupt-controller@10003000 {
clear-mask = <0xffffffff>;
/*
* Valid interrupt lines mask according to
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index b06e537d5149..4dfe321a79f6 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -57,10 +57,7 @@ CONFIG_DRM=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_MATROX=y
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 52a0400fdd92..d9abaae118dd 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_HSIC_USB3503=y
CONFIG_AB8500_USB=y
CONFIG_KEYSTONE_USB_PHY=m
-CONFIG_NOP_USB_XCEIV=m
+CONFIG_NOP_USB_XCEIV=y
CONFIG_AM335X_PHY_USB=m
CONFIG_TWL6030_USB=m
CONFIG_USB_GPIO_VBUS=y
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index 483c400dd391..4c01e313099f 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -64,11 +64,9 @@ CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_DRIVERS is not set
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 66c8b0980a0a..d9a27e4e0914 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -135,6 +135,7 @@ CONFIG_DRM_SII902X=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_FB=y
CONFIG_FB_SH_MOBILE_LCDC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_AS3711=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index dbb1ef601762..3b30913d7d8d 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -61,6 +61,10 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_BU21013=y
CONFIG_TOUCHSCREEN_CY8CTMA140=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
+CONFIG_TOUCHSCREEN_MMS114=y
+CONFIG_TOUCHSCREEN_ZINITIX=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AB8500_PONKEY=y
CONFIG_INPUT_GPIO_VIBRA=y
@@ -100,6 +104,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
CONFIG_DRM_PANEL_SONY_ACX424AKP=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_MCDE=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_KTD253=y
CONFIG_BACKLIGHT_GPIO=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index e7ecfb365e91..b703f4757021 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -60,7 +60,7 @@ CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
@@ -88,8 +88,6 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 4479369540f2..b5e246dd23f4 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -11,9 +11,6 @@ CONFIG_CPUSETS=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_DCSCB=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
@@ -23,14 +20,17 @@ CONFIG_MCPM=y
CONFIG_VMSPLIT_2G=y
CONFIG_NR_CPUS=8
CONFIG_ARM_PSCI=y
-CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAMA0"
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +43,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -59,7 +58,6 @@ CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
-# CONFIG_SATA_PMP is not set
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
CONFIG_SMC91X=y
@@ -81,11 +79,9 @@ CONFIG_DRM=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_SII902X=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_DRIVERS is not set
@@ -136,10 +132,11 @@ CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DMA_CMA=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_USER=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e07e7de9ac49..b5b13a932561 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1605,7 +1605,8 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
- depends on !(CC_IS_CLANG && GCOV_KERNEL)
+ # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Build the kernel with Branch Target Identification annotations
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index b7d532841390..076d5efc4c3d 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -948,6 +948,10 @@
<&bpmp TEGRA194_CLK_XUSB_SS>,
<&bpmp TEGRA194_CLK_XUSB_FS>;
clock-names = "dev", "ss", "ss_src", "fs_src";
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_XUSB_DEV>;
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>,
<&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
power-domain-names = "dev", "ss";
@@ -977,6 +981,10 @@
"xusb_ss", "xusb_ss_src", "xusb_hs_src",
"xusb_fs_src", "pll_u_480m", "clk_m",
"pll_e";
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_XUSB_HOST>;
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>,
<&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
@@ -2469,6 +2477,11 @@
* for 8x and 11.025x sample rate streams.
*/
assigned-clock-rates = <258000000>;
+
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_APEDMAR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_APEDMAW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_APE>;
};
tcu: tcu {
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 734c8adeceba..01482d227506 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -82,10 +82,10 @@
<GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
- clocks = <&cpg CPG_MOD R9A07G044_CLK_SCIF0>;
+ clocks = <&cpg CPG_MOD R9A07G044_SCIF0_CLK_PCK>;
clock-names = "fck";
power-domains = <&cpg>;
- resets = <&cpg R9A07G044_CLK_SCIF0>;
+ resets = <&cpg R9A07G044_SCIF0_RST_SYSTEM_N>;
status = "disabled";
};
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a9c0716e7440..a074459f8f2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 99ad77df8f52..97ddc6c203b7 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -10,6 +10,7 @@
#include <linux/cpumask.h>
+#include <asm/smp.h>
#include <asm/types.h>
struct mpidr_hash {
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cce308586fcc..3f1490bfb938 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector
# It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
-KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_entry-common.o := n
KCOV_INSTRUMENT_idle.o := n
# Object file lists.
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 125d5c9471ac..0ead8bfedf20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -81,6 +81,7 @@
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
#include <asm/virt.h>
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 12ce14a98b7c..db8b2e2d02c2 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
__el0_fiq_handler_common(regs);
}
-static void __el0_error_handler_common(struct pt_regs *regs)
+static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 69b3fde8759e..36f51b0e438a 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void)
}
#endif
-static void update_gcr_el1_excl(u64 excl)
-{
-
- /*
- * Note that the mask controlled by the user via prctl() is an
- * include while GCR_EL1 accepts an exclude mask.
- * No need for ISB since this only affects EL0 currently, implicit
- * with ERET.
- */
- sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
-}
-
static void set_gcr_el1_excl(u64 excl)
{
current->thread.gcr_user_excl = excl;
@@ -265,7 +253,8 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;
- update_gcr_el1_excl(gcr_kernel_excl);
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
+ isb();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 95cd62d67371..2cf999e41d30 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -29,7 +29,7 @@
.endm
.macro ldrh1 reg, ptr, val
- user_ldst 9998f, ldtrh, \reg, \ptr, \val
+ user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@
.endm
.macro ldr1 reg, ptr, val
- user_ldst 9998f, ldtr, \reg, \ptr, \val
+ user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
@@ -45,7 +45,7 @@
.endm
.macro ldp1 reg1, reg2, ptr, val
- user_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
@@ -53,8 +53,10 @@
.endm
end .req x5
+srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
@@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+ strb tmp1w, [dst], #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 1f61cd0df062..dbea3799c3ef 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -30,33 +30,34 @@
.endm
.macro ldrh1 reg, ptr, val
- user_ldst 9998f, ldtrh, \reg, \ptr, \val
+ user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
- user_ldst 9998f, sttrh, \reg, \ptr, \val
+ user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
- user_ldst 9998f, ldtr, \reg, \ptr, \val
+ user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
- user_ldst 9998f, sttr, \reg, \ptr, \val
+ user_ldst 9997f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
- user_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
- user_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
-
+srcin .req x15
SYM_FUNC_START(__arch_copy_in_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
@@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 043da90f5dd7..9f380eecf653 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -32,7 +32,7 @@
.endm
.macro strh1 reg, ptr, val
- user_ldst 9998f, sttrh, \reg, \ptr, \val
+ user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
@@ -40,7 +40,7 @@
.endm
.macro str1 reg, ptr, val
- user_ldst 9998f, sttr, \reg, \ptr, \val
+ user_ldst 9997f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
@@ -48,12 +48,14 @@
.endm
.macro stp1 reg1, reg2, ptr, val
- user_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
+srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
@@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+ ldrb tmp1w, [srcin]
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 35fbdb7d6e1a..1648790e91b3 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/mte-def.h>
/* Assumptions:
*
@@ -42,7 +43,16 @@
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
+/*
+ * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE
+ * (16-byte) granularity, and we must ensure that no access straddles this
+ * alignment boundary.
+ */
+#ifdef CONFIG_KASAN_HW_TAGS
+#define MIN_PAGE_SIZE MTE_GRANULE_SIZE
+#else
#define MIN_PAGE_SIZE 4096
+#endif
/* Since strings are short on average, we check the first 16 bytes
of the string for a NUL character. In order to do an unaligned ldp
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 08f9dd6903b7..86310d6e1035 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -76,7 +76,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
- fallthrough;
+ /* fallthrough */
case FPU_32BIT:
if (cpu_has_fre) {
/* clear FRE */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index cd4afcdf3725..9adad24c2e65 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1383,6 +1383,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
+ fallthrough;
case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
@@ -2169,6 +2170,7 @@ static void build_r4000_tlb_load_handler(void)
default:
if (cpu_has_mips_r2_exec_hazard) {
uasm_i_ehb(&p);
+ fallthrough;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index bdfea6d6ab69..3256a316e884 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -146,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu)
switch(psurge_type) {
case PSURGE_DUAL:
out_8(psurge_sec_intr, ~0);
+ break;
case PSURGE_NONE:
break;
default:
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index bbf8622bbf5d..bd3ef121c379 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -126,6 +126,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
+ break;
default:
break;
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c42613cfb5ba..739be5da3bca 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -765,7 +765,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
edx.split.bit_width_fixed = cap.bit_width_fixed;
- edx.split.anythread_deprecated = 1;
+ if (cap.version)
+ edx.split.anythread_deprecated = 1;
edx.split.reserved1 = 0;
edx.split.reserved2 = 0;
@@ -940,8 +941,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
unsigned phys_as = entry->eax & 0xff;
- if (!g_phys_as)
+ /*
+ * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+ * the guest operates in the same PA space as the host, i.e.
+ * reductions in MAXPHYADDR for memory encryption affect shadow
+ * paging, too.
+ *
+ * If TDP is enabled but an explicit guest MAXPHYADDR is not
+ * provided, use the raw bare metal MAXPHYADDR as reductions to
+ * the HPAs do not affect GPAs.
+ */
+ if (!tdp_enabled)
+ g_phys_as = boot_cpu_data.x86_phys_bits;
+ else if (!g_phys_as)
g_phys_as = phys_as;
+
entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
@@ -964,12 +978,18 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
case 0x8000001a:
case 0x8000001e:
break;
- /* Support memory encryption cpuid if host supports it */
case 0x8000001F:
- if (!kvm_cpu_cap_has(X86_FEATURE_SEV))
+ if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
- else
+ } else {
cpuid_entry_override(entry, CPUID_8000_001F_EAX);
+
+ /*
+ * Enumerate '0' for "PA bits reduction", the adjusted
+ * MAXPHYADDR is enumerated directly (see 0x80000008).
+ */
+ entry->ebx &= ~GENMASK(11, 6);
+ }
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 845d114ae075..66f7f5bc3482 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -53,6 +53,8 @@
#include <asm/kvm_page_track.h>
#include "trace.h"
+#include "paging.h"
+
extern bool itlb_multihit_kvm_mitigation;
int __read_mostly nx_huge_pages = -1;
diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h
new file mode 100644
index 000000000000..de8ab323bb70
--- /dev/null
+++ b/arch/x86/kvm/mmu/paging.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Shadow paging constants/helpers that don't need to be #undef'd. */
+#ifndef __KVM_X86_PAGING_H
+#define __KVM_X86_PAGING_H
+
+#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_LVL_ADDR_MASK(level) \
+ (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
+ * PT64_LEVEL_BITS))) - 1))
+#define PT64_LVL_OFFSET_MASK(level) \
+ (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
+ * PT64_LEVEL_BITS))) - 1))
+#endif /* __KVM_X86_PAGING_H */
+
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 490a028ddabe..ee044d357b5f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -24,7 +24,7 @@
#define pt_element_t u64
#define guest_walker guest_walker64
#define FNAME(name) paging##64_##name
- #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+ #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
@@ -57,7 +57,7 @@
#define pt_element_t u64
#define guest_walker guest_walkerEPT
#define FNAME(name) ept_##name
- #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+ #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 7a5ce9314107..eb7b227fc6cf 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -38,12 +38,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
#else
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#endif
-#define PT64_LVL_ADDR_MASK(level) \
- (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
- * PT64_LEVEL_BITS))) - 1))
-#define PT64_LVL_OFFSET_MASK(level) \
- (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
- * PT64_LEVEL_BITS))) - 1))
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 21d03e3a5dfd..3bd09c50c98b 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -154,6 +154,10 @@ void recalc_intercepts(struct vcpu_svm *svm)
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
+
+ /* If SMI is not intercepted, ignore guest SMI intercept as well */
+ if (!intercept_smi)
+ vmcb_clr_intercept(c, INTERCEPT_SMI);
}
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -304,8 +308,8 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
return true;
}
-static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
- struct vmcb_control_area *control)
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+ struct vmcb_control_area *control)
{
copy_vmcb_control_area(&svm->nested.ctl, control);
@@ -618,6 +622,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
u64 vmcb12_gpa;
+ if (!svm->nested.hsave_msr) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
if (is_smm(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -692,6 +701,27 @@ out:
return ret;
}
+/* Copy state save area fields which are handled by VMRUN */
+void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
+ struct vmcb_save_area *to_save)
+{
+ to_save->es = from_save->es;
+ to_save->cs = from_save->cs;
+ to_save->ss = from_save->ss;
+ to_save->ds = from_save->ds;
+ to_save->gdtr = from_save->gdtr;
+ to_save->idtr = from_save->idtr;
+ to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
+ to_save->efer = from_save->efer;
+ to_save->cr0 = from_save->cr0;
+ to_save->cr3 = from_save->cr3;
+ to_save->cr4 = from_save->cr4;
+ to_save->rax = from_save->rax;
+ to_save->rsp = from_save->rsp;
+ to_save->rip = from_save->rip;
+ to_save->cpl = 0;
+}
+
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
@@ -1355,28 +1385,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
- svm->vmcb01.ptr->save.es = save->es;
- svm->vmcb01.ptr->save.cs = save->cs;
- svm->vmcb01.ptr->save.ss = save->ss;
- svm->vmcb01.ptr->save.ds = save->ds;
- svm->vmcb01.ptr->save.gdtr = save->gdtr;
- svm->vmcb01.ptr->save.idtr = save->idtr;
- svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
- svm->vmcb01.ptr->save.efer = save->efer;
- svm->vmcb01.ptr->save.cr0 = save->cr0;
- svm->vmcb01.ptr->save.cr3 = save->cr3;
- svm->vmcb01.ptr->save.cr4 = save->cr4;
- svm->vmcb01.ptr->save.rax = save->rax;
- svm->vmcb01.ptr->save.rsp = save->rsp;
- svm->vmcb01.ptr->save.rip = save->rip;
- svm->vmcb01.ptr->save.cpl = 0;
-
+ svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
nested_load_control_from_vmcb12(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02);
-
nested_vmcb02_prepare_control(svm);
-
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 62926f1a5f7b..6710d9ee2e4b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1272,8 +1272,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
PAGE_SIZE, &n, 0);
- if (!guest_page)
- return -EFAULT;
+ if (IS_ERR(guest_page))
+ return PTR_ERR(guest_page);
/* allocate memory for header and transport buffer */
ret = -ENOMEM;
@@ -1310,8 +1310,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
}
/* Copy packet header to userspace. */
- ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
- params.hdr_len);
+ if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
+ params.hdr_len))
+ ret = -EFAULT;
e_free_trans_data:
kfree(trans_data);
@@ -1463,11 +1464,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.trans_len = params.trans_len;
/* Pin guest memory */
- ret = -EFAULT;
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
PAGE_SIZE, &n, 0);
- if (!guest_page)
+ if (IS_ERR(guest_page)) {
+ ret = PTR_ERR(guest_page);
goto e_free_trans;
+ }
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8834822c00cd..664d20f0689c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -198,6 +198,11 @@ module_param(avic, bool, 0444);
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
+
+bool intercept_smi = true;
+module_param(intercept_smi, bool, 0444);
+
+
static bool svm_gp_erratum_intercept = true;
static u8 rsm_ins_bytes[] = "\x0f\xaa";
@@ -1185,7 +1190,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_INTR);
svm_set_intercept(svm, INTERCEPT_NMI);
- svm_set_intercept(svm, INTERCEPT_SMI);
+
+ if (intercept_smi)
+ svm_set_intercept(svm, INTERCEPT_SMI);
+
svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
svm_set_intercept(svm, INTERCEPT_RDPMC);
svm_set_intercept(svm, INTERCEPT_CPUID);
@@ -1923,7 +1931,7 @@ static int npf_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+ u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
@@ -2106,6 +2114,11 @@ static int nmi_interception(struct kvm_vcpu *vcpu)
return 1;
}
+static int smi_interception(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
static int intr_interception(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
@@ -2941,7 +2954,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm_disable_lbrv(vcpu);
break;
case MSR_VM_HSAVE_PA:
- svm->nested.hsave_msr = data;
+ /*
+ * Old kernels did not validate the value written to
+ * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
+ * value to allow live migrating buggy or malicious guests
+ * originating from those kernels.
+ */
+ if (!msr->host_initiated && !page_address_valid(vcpu, data))
+ return 1;
+
+ svm->nested.hsave_msr = data & PAGE_MASK;
break;
case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data);
@@ -3080,8 +3102,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
- [SVM_EXIT_SMI] = kvm_emulate_as_nop,
- [SVM_EXIT_INIT] = kvm_emulate_as_nop,
+ [SVM_EXIT_SMI] = smi_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
[SVM_EXIT_CPUID] = kvm_emulate_cpuid,
@@ -4288,6 +4309,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_host_map map_save;
int ret;
if (is_guest_mode(vcpu)) {
@@ -4303,6 +4325,29 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
ret = nested_svm_vmexit(svm);
if (ret)
return ret;
+
+ /*
+ * KVM uses VMCB01 to store L1 host state while L2 runs but
+ * VMCB01 is going to be used during SMM and thus the state will
+ * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+ * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+ * format of the area is identical to guest save area offsetted
+ * by 0x400 (matches the offset of 'struct vmcb_save_area'
+ * within 'struct vmcb'). Note: HSAVE area may also be used by
+ * L1 hypervisor to save additional host context (e.g. KVM does
+ * that, see svm_prepare_guest_switch()) which must be
+ * preserved.
+ */
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+ &map_save) == -EINVAL)
+ return 1;
+
+ BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+ map_save.hva + 0x400);
+
+ kvm_vcpu_unmap(vcpu, &map_save, true);
}
return 0;
}
@@ -4310,13 +4355,14 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_host_map map;
+ struct kvm_host_map map, map_save;
int ret = 0;
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+ struct vmcb *vmcb12;
if (guest) {
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
@@ -4332,8 +4378,25 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (svm_allocate_nested(svm))
return 1;
- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva);
+ vmcb12 = map.hva;
+
+ nested_load_control_from_vmcb12(svm, &vmcb12->control);
+
+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
kvm_vcpu_unmap(vcpu, &map, true);
+
+ /*
+ * Restore L1 host state from L1 HSAVE area as VMCB01 was
+ * used during SMM (see svm_enter_smm())
+ */
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+ &map_save) == -EINVAL)
+ return 1;
+
+ svm_copy_vmrun_state(map_save.hva + 0x400,
+ &svm->vmcb01.ptr->save);
+
+ kvm_vcpu_unmap(vcpu, &map_save, true);
}
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f89b623bb591..7e2090752d8f 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -31,6 +31,7 @@
#define MSRPM_OFFSETS 16
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+extern bool intercept_smi;
/*
* Clean bits in VMCB.
@@ -463,6 +464,8 @@ void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
+void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
+ struct vmcb_save_area *to_save);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
@@ -479,6 +482,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+ struct vmcb_control_area *control);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 3979a947933a..db88ed4f2121 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -14,8 +14,6 @@
#include "vmx_ops.h"
#include "cpuid.h"
-extern const u32 vmx_msr_index[];
-
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
#define MSR_TYPE_RW 3
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c6dc1b445231..a4fd10604f72 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9601,6 +9601,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
+ } else if (unlikely(hw_breakpoint_active())) {
+ set_debugreg(0, 7);
}
for (;;) {
@@ -10985,9 +10987,6 @@ int kvm_arch_hardware_setup(void *opaque)
int r;
rdmsrl_safe(MSR_EFER, &host_efer);
- if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
- !(host_efer & EFER_NX)))
- return -EIO;
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e835164189f1..4b951458c9fc 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
+ if (poke->aux && poke->aux != prog->aux)
+ continue;
+
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
diff --git a/block/Kconfig b/block/Kconfig
index fd732aede922..97c1d999b920 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -29,35 +29,15 @@ if BLOCK
config BLK_RQ_ALLOC_TIME
bool
-config BLK_SCSI_REQUEST
- bool
-
config BLK_CGROUP_RWSTAT
bool
-config BLK_DEV_BSG
- bool "Block layer SG support v4"
- default y
- select BLK_SCSI_REQUEST
- help
- Saying Y here will enable generic SG (SCSI generic) v4 support
- for any block device.
-
- Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
- can handle complicated SCSI commands: tagged variable length cdbs
- with bidirectional data transfers and generic request/response
- protocols (e.g. Task Management Functions and SMP in Serial
- Attached SCSI).
-
- This option is required by recent UDEV versions to properly
- access device serial numbers, etc.
-
- If unsure, say Y.
+config BLK_DEV_BSG_COMMON
+ tristate
config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
- select BLK_DEV_BSG
- select BLK_SCSI_REQUEST
+ select BLK_DEV_BSG_COMMON
help
Subsystems will normally enable this if needed. Users will not
normally need to manually enable this.
diff --git a/block/Makefile b/block/Makefile
index bfbe4e13ca1e..640afba070fd 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -12,8 +12,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
disk-events.o
obj-$(CONFIG_BOUNCE) += bounce.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
-obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
+obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2c4ac51e54eb..495f508c6300 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3298,8 +3298,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
set->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- q->sg_reserved_size = INT_MAX;
-
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index a89d80102304..ccb98276c964 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -6,6 +6,7 @@
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
*/
+#include <linux/bsg.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/delay.h>
@@ -19,36 +20,44 @@
struct bsg_set {
struct blk_mq_tag_set tag_set;
+ struct bsg_device *bd;
bsg_job_fn *job_fn;
bsg_timeout_fn *timeout_fn;
};
-static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
+static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout)
{
+ struct bsg_job *job;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- return 0;
-}
-static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode)
-{
- struct bsg_job *job = blk_mq_rq_to_pdu(rq);
- int ret;
+ rq = blk_get_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ rq->timeout = timeout;
+ job = blk_mq_rq_to_pdu(rq);
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
- if (IS_ERR(job->request))
- return PTR_ERR(job->request);
+ if (IS_ERR(job->request)) {
+ ret = PTR_ERR(job->request);
+ goto out_put_request;
+ }
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
if (IS_ERR(job->bidi_rq)) {
ret = PTR_ERR(job->bidi_rq);
- goto out;
+ goto out_free_job_request;
}
ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
@@ -63,20 +72,20 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
job->bidi_bio = NULL;
}
- return 0;
+ ret = 0;
+ if (hdr->dout_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+ hdr->dout_xfer_len, GFP_KERNEL);
+ } else if (hdr->din_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+ hdr->din_xfer_len, GFP_KERNEL);
+ }
-out_free_bidi_rq:
- if (job->bidi_rq)
- blk_put_request(job->bidi_rq);
-out:
- kfree(job->request);
- return ret;
-}
+ if (ret)
+ goto out_unmap_bidi_rq;
-static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
-{
- struct bsg_job *job = blk_mq_rq_to_pdu(rq);
- int ret = 0;
+ bio = rq->bio;
+ blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* The assignments below don't make much sense, but are kept for
@@ -119,28 +128,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
hdr->din_resid = 0;
}
- return ret;
-}
-
-static void bsg_transport_free_rq(struct request *rq)
-{
- struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-
- if (job->bidi_rq) {
+ blk_rq_unmap_user(bio);
+out_unmap_bidi_rq:
+ if (job->bidi_rq)
blk_rq_unmap_user(job->bidi_bio);
+out_free_bidi_rq:
+ if (job->bidi_rq)
blk_put_request(job->bidi_rq);
- }
-
+out_free_job_request:
kfree(job->request);
+out_put_request:
+ blk_put_request(rq);
+ return ret;
}
-static const struct bsg_ops bsg_transport_ops = {
- .check_proto = bsg_transport_check_proto,
- .fill_hdr = bsg_transport_fill_hdr,
- .complete_rq = bsg_transport_complete_rq,
- .free_rq = bsg_transport_free_rq,
-};
-
/**
* bsg_teardown_job - routine to teardown a bsg job
* @kref: kref inside bsg_job that is to be torn down
@@ -327,7 +328,7 @@ void bsg_remove_queue(struct request_queue *q)
struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set);
- bsg_unregister_queue(q);
+ bsg_unregister_queue(bset->bd);
blk_cleanup_queue(q);
blk_mq_free_tag_set(&bset->tag_set);
kfree(bset);
@@ -396,10 +397,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
q->queuedata = dev;
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
- ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
- if (ret) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - register queue\n", dev->kobj.name);
+ bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
+ if (IS_ERR(bset->bd)) {
+ ret = PTR_ERR(bset->bd);
goto out_cleanup_queue;
}
diff --git a/block/bsg.c b/block/bsg.c
index 1f196563ae6c..351095193788 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -15,343 +15,97 @@
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
-#define bsg_dbg(bd, fmt, ...) \
- pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
-
struct bsg_device {
struct request_queue *queue;
- spinlock_t lock;
- struct hlist_node dev_list;
- refcount_t ref_count;
- char name[20];
+ struct device device;
+ struct cdev cdev;
int max_queue;
+ unsigned int timeout;
+ unsigned int reserved_size;
+ bsg_sg_io_fn *sg_io_fn;
};
+static inline struct bsg_device *to_bsg_device(struct inode *inode)
+{
+ return container_of(inode->i_cdev, struct bsg_device, cdev);
+}
+
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
-static DEFINE_MUTEX(bsg_mutex);
-static DEFINE_IDR(bsg_minor_idr);
-
-#define BSG_LIST_ARRAY_SIZE 8
-static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
-
+static DEFINE_IDA(bsg_minor_ida);
static struct class *bsg_class;
static int bsg_major;
-static inline struct hlist_head *bsg_dev_idx_hash(int index)
-{
- return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
-}
-
-#define uptr64(val) ((void __user *)(uintptr_t)(val))
-
-static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
-{
- if (hdr->protocol != BSG_PROTOCOL_SCSI ||
- hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
- return -EINVAL;
- return 0;
-}
-
-static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode)
-{
- struct scsi_request *sreq = scsi_req(rq);
-
- if (hdr->dout_xfer_len && hdr->din_xfer_len) {
- pr_warn_once("BIDI support in bsg has been removed.\n");
- return -EOPNOTSUPP;
- }
-
- sreq->cmd_len = hdr->request_len;
- if (sreq->cmd_len > BLK_MAX_CDB) {
- sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
- if (!sreq->cmd)
- return -ENOMEM;
- }
-
- if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
- return -EFAULT;
- if (blk_verify_command(sreq->cmd, mode))
- return -EPERM;
- return 0;
-}
-
-static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
+static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
- struct scsi_request *sreq = scsi_req(rq);
- int ret = 0;
-
- /*
- * fill in all the output members
- */
- hdr->device_status = sreq->result & 0xff;
- hdr->transport_status = host_byte(sreq->result);
- hdr->driver_status = 0;
- if (scsi_status_is_check_condition(sreq->result))
- hdr->driver_status = DRIVER_SENSE;
- hdr->info = 0;
- if (hdr->device_status || hdr->transport_status || hdr->driver_status)
- hdr->info |= SG_INFO_CHECK;
- hdr->response_len = 0;
-
- if (sreq->sense_len && hdr->response) {
- int len = min_t(unsigned int, hdr->max_response_len,
- sreq->sense_len);
-
- if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
- ret = -EFAULT;
- else
- hdr->response_len = len;
- }
+ unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
- if (rq_data_dir(rq) == READ)
- hdr->din_resid = sreq->resid_len;
- else
- hdr->dout_resid = sreq->resid_len;
+ if (hdr->timeout)
+ timeout = msecs_to_jiffies(hdr->timeout);
+ else if (bd->timeout)
+ timeout = bd->timeout;
- return ret;
+ return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
}
-static void bsg_scsi_free_rq(struct request *rq)
+static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
{
- scsi_req_free_cmd(scsi_req(rq));
-}
-
-static const struct bsg_ops bsg_scsi_ops = {
- .check_proto = bsg_scsi_check_proto,
- .fill_hdr = bsg_scsi_fill_hdr,
- .complete_rq = bsg_scsi_complete_rq,
- .free_rq = bsg_scsi_free_rq,
-};
-
-static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
-{
- struct request *rq;
- struct bio *bio;
struct sg_io_v4 hdr;
int ret;
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
-
- if (!q->bsg_dev.class_dev)
- return -ENXIO;
-
if (hdr.guard != 'Q')
return -EINVAL;
- ret = q->bsg_dev.ops->check_proto(&hdr);
- if (ret)
- return ret;
-
- rq = blk_get_request(q, hdr.dout_xfer_len ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
- if (ret) {
- blk_put_request(rq);
- return ret;
- }
-
- rq->timeout = msecs_to_jiffies(hdr.timeout);
- if (!rq->timeout)
- rq->timeout = q->sg_timeout;
- if (!rq->timeout)
- rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- if (rq->timeout < BLK_MIN_SG_TIMEOUT)
- rq->timeout = BLK_MIN_SG_TIMEOUT;
-
- if (hdr.dout_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
- hdr.dout_xfer_len, GFP_KERNEL);
- } else if (hdr.din_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
- hdr.din_xfer_len, GFP_KERNEL);
- }
-
- if (ret)
- goto out_free_rq;
-
- bio = rq->bio;
-
- blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
- ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
- blk_rq_unmap_user(bio);
-
-out_free_rq:
- rq->q->bsg_dev.ops->free_rq(rq);
- blk_put_request(rq);
+ ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
return ret;
}
-static struct bsg_device *bsg_alloc_device(void)
-{
- struct bsg_device *bd;
-
- bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
- if (unlikely(!bd))
- return NULL;
-
- spin_lock_init(&bd->lock);
- bd->max_queue = BSG_DEFAULT_CMDS;
- INIT_HLIST_NODE(&bd->dev_list);
- return bd;
-}
-
-static int bsg_put_device(struct bsg_device *bd)
-{
- struct request_queue *q = bd->queue;
-
- mutex_lock(&bsg_mutex);
-
- if (!refcount_dec_and_test(&bd->ref_count)) {
- mutex_unlock(&bsg_mutex);
- return 0;
- }
-
- hlist_del(&bd->dev_list);
- mutex_unlock(&bsg_mutex);
-
- bsg_dbg(bd, "tearing down\n");
-
- /*
- * close can always block
- */
- kfree(bd);
- blk_put_queue(q);
- return 0;
-}
-
-static struct bsg_device *bsg_add_device(struct inode *inode,
- struct request_queue *rq,
- struct file *file)
-{
- struct bsg_device *bd;
- unsigned char buf[32];
-
- lockdep_assert_held(&bsg_mutex);
-
- if (!blk_get_queue(rq))
- return ERR_PTR(-ENXIO);
-
- bd = bsg_alloc_device();
- if (!bd) {
- blk_put_queue(rq);
- return ERR_PTR(-ENOMEM);
- }
-
- bd->queue = rq;
-
- refcount_set(&bd->ref_count, 1);
- hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
-
- strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
- bsg_dbg(bd, "bound to <%s>, max queue %d\n",
- format_dev_t(buf, inode->i_rdev), bd->max_queue);
-
- return bd;
-}
-
-static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
-{
- struct bsg_device *bd;
-
- lockdep_assert_held(&bsg_mutex);
-
- hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
- if (bd->queue == q) {
- refcount_inc(&bd->ref_count);
- goto found;
- }
- }
- bd = NULL;
-found:
- return bd;
-}
-
-static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
-{
- struct bsg_device *bd;
- struct bsg_class_device *bcd;
-
- /*
- * find the class device
- */
- mutex_lock(&bsg_mutex);
- bcd = idr_find(&bsg_minor_idr, iminor(inode));
-
- if (!bcd) {
- bd = ERR_PTR(-ENODEV);
- goto out_unlock;
- }
-
- bd = __bsg_get_device(iminor(inode), bcd->queue);
- if (!bd)
- bd = bsg_add_device(inode, bcd->queue, file);
-
-out_unlock:
- mutex_unlock(&bsg_mutex);
- return bd;
-}
-
static int bsg_open(struct inode *inode, struct file *file)
{
- struct bsg_device *bd;
-
- bd = bsg_get_device(inode, file);
-
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- file->private_data = bd;
+ if (!blk_get_queue(to_bsg_device(inode)->queue))
+ return -ENXIO;
return 0;
}
static int bsg_release(struct inode *inode, struct file *file)
{
- struct bsg_device *bd = file->private_data;
-
- file->private_data = NULL;
- return bsg_put_device(bd);
+ blk_put_queue(to_bsg_device(inode)->queue);
+ return 0;
}
static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
{
- return put_user(bd->max_queue, uarg);
+ return put_user(READ_ONCE(bd->max_queue), uarg);
}
static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
{
- int queue;
+ int max_queue;
- if (get_user(queue, uarg))
+ if (get_user(max_queue, uarg))
return -EFAULT;
- if (queue < 1)
+ if (max_queue < 1)
return -EINVAL;
-
- spin_lock_irq(&bd->lock);
- bd->max_queue = queue;
- spin_unlock_irq(&bd->lock);
+ WRITE_ONCE(bd->max_queue, max_queue);
return 0;
}
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct bsg_device *bd = file->private_data;
+ struct bsg_device *bd = to_bsg_device(file_inode(file));
+ struct request_queue *q = bd->queue;
void __user *uarg = (void __user *) arg;
+ int __user *intp = uarg;
+ int val;
switch (cmd) {
/*
@@ -366,17 +120,37 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* SCSI/sg ioctls
*/
case SG_GET_VERSION_NUM:
+ return put_user(30527, intp);
case SCSI_IOCTL_GET_IDLUN:
+ return put_user(0, intp);
case SCSI_IOCTL_GET_BUS_NUMBER:
+ return put_user(0, intp);
case SG_SET_TIMEOUT:
+ if (get_user(val, intp))
+ return -EFAULT;
+ bd->timeout = clock_t_to_jiffies(val);
+ return 0;
case SG_GET_TIMEOUT:
+ return jiffies_to_clock_t(bd->timeout);
case SG_GET_RESERVED_SIZE:
+ return put_user(min(bd->reserved_size, queue_max_bytes(q)),
+ intp);
case SG_SET_RESERVED_SIZE:
+ if (get_user(val, intp))
+ return -EFAULT;
+ if (val < 0)
+ return -EINVAL;
+ bd->reserved_size =
+ min_t(unsigned int, val, queue_max_bytes(q));
+ return 0;
case SG_EMULATED_HOST:
- case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
+ return put_user(1, intp);
case SG_IO:
- return bsg_sg_io(bd->queue, file->f_mode, uarg);
+ return bsg_sg_io(bd, file->f_mode, uarg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
+ current->comm);
+ return -EINVAL;
default:
return -ENOTTY;
}
@@ -391,92 +165,65 @@ static const struct file_operations bsg_fops = {
.llseek = default_llseek,
};
-void bsg_unregister_queue(struct request_queue *q)
+void bsg_unregister_queue(struct bsg_device *bd)
{
- struct bsg_class_device *bcd = &q->bsg_dev;
-
- if (!bcd->class_dev)
- return;
-
- mutex_lock(&bsg_mutex);
- idr_remove(&bsg_minor_idr, bcd->minor);
- if (q->kobj.sd)
- sysfs_remove_link(&q->kobj, "bsg");
- device_unregister(bcd->class_dev);
- bcd->class_dev = NULL;
- mutex_unlock(&bsg_mutex);
+ if (bd->queue->kobj.sd)
+ sysfs_remove_link(&bd->queue->kobj, "bsg");
+ cdev_device_del(&bd->cdev, &bd->device);
+ ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+ kfree(bd);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
-int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops)
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
{
- struct bsg_class_device *bcd;
- dev_t dev;
+ struct bsg_device *bd;
int ret;
- struct device *class_dev = NULL;
- /*
- * we need a proper transport to send commands, not a stacked device
- */
- if (!queue_is_mq(q))
- return 0;
-
- bcd = &q->bsg_dev;
- memset(bcd, 0, sizeof(*bcd));
-
- mutex_lock(&bsg_mutex);
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return ERR_PTR(-ENOMEM);
+ bd->max_queue = BSG_DEFAULT_CMDS;
+ bd->reserved_size = INT_MAX;
+ bd->queue = q;
+ bd->sg_io_fn = sg_io_fn;
- ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+ ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
if (ret < 0) {
- if (ret == -ENOSPC) {
- printk(KERN_ERR "bsg: too many bsg devices\n");
- ret = -EINVAL;
- }
- goto unlock;
- }
-
- bcd->minor = ret;
- bcd->queue = q;
- bcd->ops = ops;
- dev = MKDEV(bsg_major, bcd->minor);
- class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
- if (IS_ERR(class_dev)) {
- ret = PTR_ERR(class_dev);
- goto idr_remove;
+ if (ret == -ENOSPC)
+ dev_err(parent, "bsg: too many bsg devices\n");
+ goto out_kfree;
}
- bcd->class_dev = class_dev;
+ bd->device.devt = MKDEV(bsg_major, ret);
+ bd->device.class = bsg_class;
+ bd->device.parent = parent;
+ dev_set_name(&bd->device, "%s", name);
+ device_initialize(&bd->device);
+
+ cdev_init(&bd->cdev, &bsg_fops);
+ bd->cdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&bd->cdev, &bd->device);
+ if (ret)
+ goto out_ida_remove;
if (q->kobj.sd) {
- ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+ ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
if (ret)
- goto unregister_class_dev;
+ goto out_device_del;
}
- mutex_unlock(&bsg_mutex);
- return 0;
-
-unregister_class_dev:
- device_unregister(class_dev);
-idr_remove:
- idr_remove(&bsg_minor_idr, bcd->minor);
-unlock:
- mutex_unlock(&bsg_mutex);
- return ret;
-}
-
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
-{
- if (!blk_queue_scsi_passthrough(q)) {
- WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- return -EINVAL;
- }
+ return bd;
- return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
+out_device_del:
+ cdev_device_del(&bd->cdev, &bd->device);
+out_ida_remove:
+ ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+out_kfree:
+ kfree(bd);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
-
-static struct cdev bsg_cdev;
+EXPORT_SYMBOL_GPL(bsg_register_queue);
static char *bsg_devnode(struct device *dev, umode_t *mode)
{
@@ -485,11 +232,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode)
static int __init bsg_init(void)
{
- int ret, i;
dev_t devid;
-
- for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
- INIT_HLIST_HEAD(&bsg_device_list[i]);
+ int ret;
bsg_class = class_create(THIS_MODULE, "bsg");
if (IS_ERR(bsg_class))
@@ -499,19 +243,12 @@ static int __init bsg_init(void)
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
if (ret)
goto destroy_bsg_class;
-
bsg_major = MAJOR(devid);
- cdev_init(&bsg_cdev, &bsg_fops);
- ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
- if (ret)
- goto unregister_chrdev;
-
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
" loaded (major %d)\n", bsg_major);
return 0;
-unregister_chrdev:
- unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+
destroy_bsg_class:
class_destroy(bsg_class);
return ret;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
deleted file mode 100644
index d247431a6853..000000000000
--- a/block/scsi_ioctl.c
+++ /dev/null
@@ -1,890 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- */
-#include <linux/compat.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/capability.h>
-#include <linux/completion.h>
-#include <linux/cdrom.h>
-#include <linux/ratelimit.h>
-#include <linux/slab.h>
-#include <linux/times.h>
-#include <linux/uio.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/sg.h>
-
-struct blk_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
-};
-
-static struct blk_cmd_filter blk_default_cmd_filter;
-
-/* Command group 3 is reserved and should never be used. */
-const unsigned char scsi_command_size_tbl[8] =
-{
- 6, 10, 10, 12,
- 16, 12, 10, 10
-};
-EXPORT_SYMBOL(scsi_command_size_tbl);
-
-static int sg_get_version(int __user *p)
-{
- static const int sg_version_num = 30527;
- return put_user(sg_version_num, p);
-}
-
-static int scsi_get_idlun(struct request_queue *q, int __user *p)
-{
- return put_user(0, p);
-}
-
-static int scsi_get_bus(struct request_queue *q, int __user *p)
-{
- return put_user(0, p);
-}
-
-static int sg_get_timeout(struct request_queue *q)
-{
- return jiffies_to_clock_t(q->sg_timeout);
-}
-
-static int sg_set_timeout(struct request_queue *q, int __user *p)
-{
- int timeout, err = get_user(timeout, p);
-
- if (!err)
- q->sg_timeout = clock_t_to_jiffies(timeout);
-
- return err;
-}
-
-static int max_sectors_bytes(struct request_queue *q)
-{
- unsigned int max_sectors = queue_max_sectors(q);
-
- max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
-
- return max_sectors << 9;
-}
-
-static int sg_get_reserved_size(struct request_queue *q, int __user *p)
-{
- int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q));
-
- return put_user(val, p);
-}
-
-static int sg_set_reserved_size(struct request_queue *q, int __user *p)
-{
- int size, err = get_user(size, p);
-
- if (err)
- return err;
-
- if (size < 0)
- return -EINVAL;
-
- q->sg_reserved_size = min(size, max_sectors_bytes(q));
- return 0;
-}
-
-/*
- * will always return that we are ATAPI even for a real SCSI drive, I'm not
- * so sure this is worth doing anything about (why would you care??)
- */
-static int sg_emulated_host(struct request_queue *q, int __user *p)
-{
- return put_user(1, p);
-}
-
-static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
-{
- /* Basic read-only commands */
- __set_bit(TEST_UNIT_READY, filter->read_ok);
- __set_bit(REQUEST_SENSE, filter->read_ok);
- __set_bit(READ_6, filter->read_ok);
- __set_bit(READ_10, filter->read_ok);
- __set_bit(READ_12, filter->read_ok);
- __set_bit(READ_16, filter->read_ok);
- __set_bit(READ_BUFFER, filter->read_ok);
- __set_bit(READ_DEFECT_DATA, filter->read_ok);
- __set_bit(READ_CAPACITY, filter->read_ok);
- __set_bit(READ_LONG, filter->read_ok);
- __set_bit(INQUIRY, filter->read_ok);
- __set_bit(MODE_SENSE, filter->read_ok);
- __set_bit(MODE_SENSE_10, filter->read_ok);
- __set_bit(LOG_SENSE, filter->read_ok);
- __set_bit(START_STOP, filter->read_ok);
- __set_bit(GPCMD_VERIFY_10, filter->read_ok);
- __set_bit(VERIFY_16, filter->read_ok);
- __set_bit(REPORT_LUNS, filter->read_ok);
- __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
- __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
- __set_bit(MAINTENANCE_IN, filter->read_ok);
- __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
-
- /* Audio CD commands */
- __set_bit(GPCMD_PLAY_CD, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
- __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
-
- /* CD/DVD data reading */
- __set_bit(GPCMD_READ_CD, filter->read_ok);
- __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
- __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
- __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
- __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
- __set_bit(GPCMD_READ_HEADER, filter->read_ok);
- __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
- __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
- __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
- __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
- __set_bit(GPCMD_SCAN, filter->read_ok);
- __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
- __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
- __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
- __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
- __set_bit(GPCMD_SEEK, filter->read_ok);
- __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
-
- /* Basic writing commands */
- __set_bit(WRITE_6, filter->write_ok);
- __set_bit(WRITE_10, filter->write_ok);
- __set_bit(WRITE_VERIFY, filter->write_ok);
- __set_bit(WRITE_12, filter->write_ok);
- __set_bit(WRITE_VERIFY_12, filter->write_ok);
- __set_bit(WRITE_16, filter->write_ok);
- __set_bit(WRITE_LONG, filter->write_ok);
- __set_bit(WRITE_LONG_2, filter->write_ok);
- __set_bit(WRITE_SAME, filter->write_ok);
- __set_bit(WRITE_SAME_16, filter->write_ok);
- __set_bit(WRITE_SAME_32, filter->write_ok);
- __set_bit(ERASE, filter->write_ok);
- __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
- __set_bit(MODE_SELECT, filter->write_ok);
- __set_bit(LOG_SELECT, filter->write_ok);
- __set_bit(GPCMD_BLANK, filter->write_ok);
- __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
- __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
- __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
- __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
- __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
- __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
- __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
- __set_bit(GPCMD_SEND_KEY, filter->write_ok);
- __set_bit(GPCMD_SEND_OPC, filter->write_ok);
- __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
- __set_bit(GPCMD_SET_SPEED, filter->write_ok);
- __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
- __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
- __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
- __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
-
- /* ZBC Commands */
- __set_bit(ZBC_OUT, filter->write_ok);
- __set_bit(ZBC_IN, filter->read_ok);
-}
-
-int blk_verify_command(unsigned char *cmd, fmode_t mode)
-{
- struct blk_cmd_filter *filter = &blk_default_cmd_filter;
-
- /* root can do any command. */
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- /* Anybody who can open the device can do a read-safe command */
- if (test_bit(cmd[0], filter->read_ok))
- return 0;
-
- /* Write-safe commands require a writable open */
- if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE))
- return 0;
-
- return -EPERM;
-}
-EXPORT_SYMBOL(blk_verify_command);
-
-static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
- struct sg_io_hdr *hdr, fmode_t mode)
-{
- struct scsi_request *req = scsi_req(rq);
-
- if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
- return -EFAULT;
- if (blk_verify_command(req->cmd, mode))
- return -EPERM;
-
- /*
- * fill in request structure
- */
- req->cmd_len = hdr->cmd_len;
-
- rq->timeout = msecs_to_jiffies(hdr->timeout);
- if (!rq->timeout)
- rq->timeout = q->sg_timeout;
- if (!rq->timeout)
- rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- if (rq->timeout < BLK_MIN_SG_TIMEOUT)
- rq->timeout = BLK_MIN_SG_TIMEOUT;
-
- return 0;
-}
-
-static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
- struct bio *bio)
-{
- struct scsi_request *req = scsi_req(rq);
- int r, ret = 0;
-
- /*
- * fill in all the output members
- */
- hdr->status = req->result & 0xff;
- hdr->masked_status = status_byte(req->result);
- hdr->msg_status = COMMAND_COMPLETE;
- hdr->host_status = host_byte(req->result);
- hdr->driver_status = 0;
- if (scsi_status_is_check_condition(hdr->status))
- hdr->driver_status = DRIVER_SENSE;
- hdr->info = 0;
- if (hdr->masked_status || hdr->host_status || hdr->driver_status)
- hdr->info |= SG_INFO_CHECK;
- hdr->resid = req->resid_len;
- hdr->sb_len_wr = 0;
-
- if (req->sense_len && hdr->sbp) {
- int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
-
- if (!copy_to_user(hdr->sbp, req->sense, len))
- hdr->sb_len_wr = len;
- else
- ret = -EFAULT;
- }
-
- r = blk_rq_unmap_user(bio);
- if (!ret)
- ret = r;
-
- return ret;
-}
-
-static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
- struct sg_io_hdr *hdr, fmode_t mode)
-{
- unsigned long start_time;
- ssize_t ret = 0;
- int writing = 0;
- int at_head = 0;
- struct request *rq;
- struct scsi_request *req;
- struct bio *bio;
-
- if (hdr->interface_id != 'S')
- return -EINVAL;
-
- if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
- return -EIO;
-
- if (hdr->dxfer_len)
- switch (hdr->dxfer_direction) {
- default:
- return -EINVAL;
- case SG_DXFER_TO_DEV:
- writing = 1;
- break;
- case SG_DXFER_TO_FROM_DEV:
- case SG_DXFER_FROM_DEV:
- break;
- }
- if (hdr->flags & SG_FLAG_Q_AT_HEAD)
- at_head = 1;
-
- ret = -ENOMEM;
- rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- req = scsi_req(rq);
-
- if (hdr->cmd_len > BLK_MAX_CDB) {
- req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
- if (!req->cmd)
- goto out_put_request;
- }
-
- ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
- if (ret < 0)
- goto out_free_cdb;
-
- ret = 0;
- if (hdr->iovec_count) {
- struct iov_iter i;
- struct iovec *iov = NULL;
-
- ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
- hdr->iovec_count, 0, &iov, &i);
- if (ret < 0)
- goto out_free_cdb;
-
- /* SG_IO howto says that the shorter of the two wins */
- iov_iter_truncate(&i, hdr->dxfer_len);
-
- ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
- GFP_KERNEL);
-
- if (ret)
- goto out_free_cdb;
-
- bio = rq->bio;
- req->retries = 0;
-
- start_time = jiffies;
-
- blk_execute_rq(bd_disk, rq, at_head);
-
- hdr->duration = jiffies_to_msecs(jiffies - start_time);
-
- ret = blk_complete_sghdr_rq(rq, hdr, bio);
-
-out_free_cdb:
- scsi_req_free_cmd(req);
-out_put_request:
- blk_put_request(rq);
- return ret;
-}
-
-/**
- * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
- * @q: request queue to send scsi commands down
- * @disk: gendisk to operate on (option)
- * @mode: mode used to open the file through which the ioctl has been
- * submitted
- * @sic: userspace structure describing the command to perform
- *
- * Send down the scsi command described by @sic to the device below
- * the request queue @q. If @file is non-NULL it's used to perform
- * fine-grained permission checks that allow users to send down
- * non-destructive SCSI commands. If the caller has a struct gendisk
- * available it should be passed in as @disk to allow the low level
- * driver to use the information contained in it. A non-NULL @disk
- * is only allowed if the caller knows that the low level driver doesn't
- * need it (e.g. in the scsi subsystem).
- *
- * Notes:
- * - This interface is deprecated - users should use the SG_IO
- * interface instead, as this is a more flexible approach to
- * performing SCSI commands on a device.
- * - The SCSI command length is determined by examining the 1st byte
- * of the given command. There is no way to override this.
- * - Data transfers are limited to PAGE_SIZE
- * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
- * accommodate the sense buffer when an error occurs.
- * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
- * old code will not be surprised.
- * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
- * a negative return and the Unix error code in 'errno'.
- * If the SCSI command succeeds then 0 is returned.
- * Positive numbers returned are the compacted SCSI error codes (4
- * bytes in one int) where the lowest byte is the SCSI status.
- */
-int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
- struct scsi_ioctl_command __user *sic)
-{
- enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
- struct request *rq;
- struct scsi_request *req;
- int err;
- unsigned int in_len, out_len, bytes, opcode, cmdlen;
- char *buffer = NULL;
-
- if (!sic)
- return -EINVAL;
-
- /*
- * get in an out lengths, verify they don't exceed a page worth of data
- */
- if (get_user(in_len, &sic->inlen))
- return -EFAULT;
- if (get_user(out_len, &sic->outlen))
- return -EFAULT;
- if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
- return -EINVAL;
- if (get_user(opcode, sic->data))
- return -EFAULT;
-
- bytes = max(in_len, out_len);
- if (bytes) {
- buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
- if (!buffer)
- return -ENOMEM;
-
- }
-
- rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto error_free_buffer;
- }
- req = scsi_req(rq);
-
- cmdlen = COMMAND_SIZE(opcode);
-
- /*
- * get command and data to send to device, if any
- */
- err = -EFAULT;
- req->cmd_len = cmdlen;
- if (copy_from_user(req->cmd, sic->data, cmdlen))
- goto error;
-
- if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
- goto error;
-
- err = blk_verify_command(req->cmd, mode);
- if (err)
- goto error;
-
- /* default. possible overriden later */
- req->retries = 5;
-
- switch (opcode) {
- case SEND_DIAGNOSTIC:
- case FORMAT_UNIT:
- rq->timeout = FORMAT_UNIT_TIMEOUT;
- req->retries = 1;
- break;
- case START_STOP:
- rq->timeout = START_STOP_TIMEOUT;
- break;
- case MOVE_MEDIUM:
- rq->timeout = MOVE_MEDIUM_TIMEOUT;
- break;
- case READ_ELEMENT_STATUS:
- rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
- break;
- case READ_DEFECT_DATA:
- rq->timeout = READ_DEFECT_DATA_TIMEOUT;
- req->retries = 1;
- break;
- default:
- rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- break;
- }
-
- if (bytes) {
- err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
- if (err)
- goto error;
- }
-
- blk_execute_rq(disk, rq, 0);
-
- err = req->result & 0xff; /* only 8 bit SCSI status */
- if (err) {
- if (req->sense_len && req->sense) {
- bytes = (OMAX_SB_LEN > req->sense_len) ?
- req->sense_len : OMAX_SB_LEN;
- if (copy_to_user(sic->data, req->sense, bytes))
- err = -EFAULT;
- }
- } else {
- if (copy_to_user(sic->data, buffer, out_len))
- err = -EFAULT;
- }
-
-error:
- blk_put_request(rq);
-
-error_free_buffer:
- kfree(buffer);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
-
-/* Send basic block requests */
-static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
- int cmd, int data)
-{
- struct request *rq;
- int err;
-
- rq = blk_get_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- scsi_req(rq)->cmd[0] = cmd;
- scsi_req(rq)->cmd[4] = data;
- scsi_req(rq)->cmd_len = 6;
- blk_execute_rq(bd_disk, rq, 0);
- err = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
-
- return err;
-}
-
-static inline int blk_send_start_stop(struct request_queue *q,
- struct gendisk *bd_disk, int data)
-{
- return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
-}
-
-int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
-{
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall()) {
- struct compat_sg_io_hdr hdr32 = {
- .interface_id = hdr->interface_id,
- .dxfer_direction = hdr->dxfer_direction,
- .cmd_len = hdr->cmd_len,
- .mx_sb_len = hdr->mx_sb_len,
- .iovec_count = hdr->iovec_count,
- .dxfer_len = hdr->dxfer_len,
- .dxferp = (uintptr_t)hdr->dxferp,
- .cmdp = (uintptr_t)hdr->cmdp,
- .sbp = (uintptr_t)hdr->sbp,
- .timeout = hdr->timeout,
- .flags = hdr->flags,
- .pack_id = hdr->pack_id,
- .usr_ptr = (uintptr_t)hdr->usr_ptr,
- .status = hdr->status,
- .masked_status = hdr->masked_status,
- .msg_status = hdr->msg_status,
- .sb_len_wr = hdr->sb_len_wr,
- .host_status = hdr->host_status,
- .driver_status = hdr->driver_status,
- .resid = hdr->resid,
- .duration = hdr->duration,
- .info = hdr->info,
- };
-
- if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
- return -EFAULT;
-
- return 0;
- }
-#endif
-
- if (copy_to_user(argp, hdr, sizeof(*hdr)))
- return -EFAULT;
-
- return 0;
-}
-EXPORT_SYMBOL(put_sg_io_hdr);
-
-int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
-{
-#ifdef CONFIG_COMPAT
- struct compat_sg_io_hdr hdr32;
-
- if (in_compat_syscall()) {
- if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
- return -EFAULT;
-
- *hdr = (struct sg_io_hdr) {
- .interface_id = hdr32.interface_id,
- .dxfer_direction = hdr32.dxfer_direction,
- .cmd_len = hdr32.cmd_len,
- .mx_sb_len = hdr32.mx_sb_len,
- .iovec_count = hdr32.iovec_count,
- .dxfer_len = hdr32.dxfer_len,
- .dxferp = compat_ptr(hdr32.dxferp),
- .cmdp = compat_ptr(hdr32.cmdp),
- .sbp = compat_ptr(hdr32.sbp),
- .timeout = hdr32.timeout,
- .flags = hdr32.flags,
- .pack_id = hdr32.pack_id,
- .usr_ptr = compat_ptr(hdr32.usr_ptr),
- .status = hdr32.status,
- .masked_status = hdr32.masked_status,
- .msg_status = hdr32.msg_status,
- .sb_len_wr = hdr32.sb_len_wr,
- .host_status = hdr32.host_status,
- .driver_status = hdr32.driver_status,
- .resid = hdr32.resid,
- .duration = hdr32.duration,
- .info = hdr32.info,
- };
-
- return 0;
- }
-#endif
-
- if (copy_from_user(hdr, argp, sizeof(*hdr)))
- return -EFAULT;
-
- return 0;
-}
-EXPORT_SYMBOL(get_sg_io_hdr);
-
-#ifdef CONFIG_COMPAT
-struct compat_cdrom_generic_command {
- unsigned char cmd[CDROM_PACKET_SIZE];
- compat_caddr_t buffer;
- compat_uint_t buflen;
- compat_int_t stat;
- compat_caddr_t sense;
- unsigned char data_direction;
- unsigned char pad[3];
- compat_int_t quiet;
- compat_int_t timeout;
- compat_caddr_t unused;
-};
-#endif
-
-static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
- const void __user *arg)
-{
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall()) {
- struct compat_cdrom_generic_command cgc32;
-
- if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
- return -EFAULT;
-
- *cgc = (struct cdrom_generic_command) {
- .buffer = compat_ptr(cgc32.buffer),
- .buflen = cgc32.buflen,
- .stat = cgc32.stat,
- .sense = compat_ptr(cgc32.sense),
- .data_direction = cgc32.data_direction,
- .quiet = cgc32.quiet,
- .timeout = cgc32.timeout,
- .unused = compat_ptr(cgc32.unused),
- };
- memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
- return 0;
- }
-#endif
- if (copy_from_user(cgc, arg, sizeof(*cgc)))
- return -EFAULT;
-
- return 0;
-}
-
-static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
- void __user *arg)
-{
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall()) {
- struct compat_cdrom_generic_command cgc32 = {
- .buffer = (uintptr_t)(cgc->buffer),
- .buflen = cgc->buflen,
- .stat = cgc->stat,
- .sense = (uintptr_t)(cgc->sense),
- .data_direction = cgc->data_direction,
- .quiet = cgc->quiet,
- .timeout = cgc->timeout,
- .unused = (uintptr_t)(cgc->unused),
- };
- memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
-
- if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
- return -EFAULT;
-
- return 0;
- }
-#endif
- if (copy_to_user(arg, cgc, sizeof(*cgc)))
- return -EFAULT;
-
- return 0;
-}
-
-static int scsi_cdrom_send_packet(struct request_queue *q,
- struct gendisk *bd_disk,
- fmode_t mode, void __user *arg)
-{
- struct cdrom_generic_command cgc;
- struct sg_io_hdr hdr;
- int err;
-
- err = scsi_get_cdrom_generic_arg(&cgc, arg);
- if (err)
- return err;
-
- cgc.timeout = clock_t_to_jiffies(cgc.timeout);
- memset(&hdr, 0, sizeof(hdr));
- hdr.interface_id = 'S';
- hdr.cmd_len = sizeof(cgc.cmd);
- hdr.dxfer_len = cgc.buflen;
- switch (cgc.data_direction) {
- case CGC_DATA_UNKNOWN:
- hdr.dxfer_direction = SG_DXFER_UNKNOWN;
- break;
- case CGC_DATA_WRITE:
- hdr.dxfer_direction = SG_DXFER_TO_DEV;
- break;
- case CGC_DATA_READ:
- hdr.dxfer_direction = SG_DXFER_FROM_DEV;
- break;
- case CGC_DATA_NONE:
- hdr.dxfer_direction = SG_DXFER_NONE;
- break;
- default:
- return -EINVAL;
- }
-
- hdr.dxferp = cgc.buffer;
- hdr.sbp = cgc.sense;
- if (hdr.sbp)
- hdr.mx_sb_len = sizeof(struct request_sense);
- hdr.timeout = jiffies_to_msecs(cgc.timeout);
- hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
- hdr.cmd_len = sizeof(cgc.cmd);
-
- err = sg_io(q, bd_disk, &hdr, mode);
- if (err == -EFAULT)
- return -EFAULT;
-
- if (hdr.status)
- return -EIO;
-
- cgc.stat = err;
- cgc.buflen = hdr.resid;
- if (scsi_put_cdrom_generic_arg(&cgc, arg))
- return -EFAULT;
-
- return err;
-}
-
-int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
- unsigned int cmd, void __user *arg)
-{
- int err;
-
- if (!q)
- return -ENXIO;
-
- switch (cmd) {
- /*
- * new sgv3 interface
- */
- case SG_GET_VERSION_NUM:
- err = sg_get_version(arg);
- break;
- case SCSI_IOCTL_GET_IDLUN:
- err = scsi_get_idlun(q, arg);
- break;
- case SCSI_IOCTL_GET_BUS_NUMBER:
- err = scsi_get_bus(q, arg);
- break;
- case SG_SET_TIMEOUT:
- err = sg_set_timeout(q, arg);
- break;
- case SG_GET_TIMEOUT:
- err = sg_get_timeout(q);
- break;
- case SG_GET_RESERVED_SIZE:
- err = sg_get_reserved_size(q, arg);
- break;
- case SG_SET_RESERVED_SIZE:
- err = sg_set_reserved_size(q, arg);
- break;
- case SG_EMULATED_HOST:
- err = sg_emulated_host(q, arg);
- break;
- case SG_IO: {
- struct sg_io_hdr hdr;
-
- err = get_sg_io_hdr(&hdr, arg);
- if (err)
- break;
- err = sg_io(q, bd_disk, &hdr, mode);
- if (err == -EFAULT)
- break;
-
- if (put_sg_io_hdr(&hdr, arg))
- err = -EFAULT;
- break;
- }
- case CDROM_SEND_PACKET:
- err = scsi_cdrom_send_packet(q, bd_disk, mode, arg);
- break;
-
- /*
- * old junk scsi send command ioctl
- */
- case SCSI_IOCTL_SEND_COMMAND:
- printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
- err = -EINVAL;
- if (!arg)
- break;
-
- err = sg_scsi_ioctl(q, bd_disk, mode, arg);
- break;
- case CDROMCLOSETRAY:
- err = blk_send_start_stop(q, bd_disk, 0x03);
- break;
- case CDROMEJECT:
- err = blk_send_start_stop(q, bd_disk, 0x02);
- break;
- default:
- err = -ENOTTY;
- }
-
- return err;
-}
-EXPORT_SYMBOL(scsi_cmd_ioctl);
-
-int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
-{
- if (bd && !bdev_is_partition(bd))
- return 0;
-
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- return -ENOIOCTLCMD;
-}
-EXPORT_SYMBOL(scsi_verify_blk_ioctl);
-
-int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
- unsigned int cmd, void __user *arg)
-{
- int ret;
-
- ret = scsi_verify_blk_ioctl(bd, cmd);
- if (ret < 0)
- return ret;
-
- return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
-}
-EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
-
-/**
- * scsi_req_init - initialize certain fields of a scsi_request structure
- * @req: Pointer to a scsi_request structure.
- * Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members
- * of struct scsi_request.
- */
-void scsi_req_init(struct scsi_request *req)
-{
- memset(req->__cmd, 0, sizeof(req->__cmd));
- req->cmd = req->__cmd;
- req->cmd_len = BLK_MAX_CDB;
- req->sense_len = 0;
-}
-EXPORT_SYMBOL(scsi_req_init);
-
-static int __init blk_scsi_ioctl_init(void)
-{
- blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
- return 0;
-}
-fs_initcall(blk_scsi_ioctl_init);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bb3637762985..bf9c4b6c5c3d 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -912,7 +912,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
- blk_abort_request(qc->scsicmd->request);
+ blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
}
/**
@@ -1893,8 +1893,7 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
*/
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
{
- if (qc->scsicmd &&
- qc->scsicmd->request->rq_flags & RQF_QUIET)
+ if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
return qc->flags & ATA_QCFLAG_QUIET;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9588c52815d..f7f630485465 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -631,7 +631,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
{
struct ata_queued_cmd *qc;
- qc = ata_qc_new_init(dev, cmd->request->tag);
+ qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = cmd->scsi_done;
@@ -639,7 +639,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
- if (cmd->request->rq_flags & RQF_QUIET)
+ if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
} else {
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
@@ -1496,7 +1496,7 @@ nothing_to_do:
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
{
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
u32 req_blocks;
if (!blk_rq_is_passthrough(rq))
@@ -1531,7 +1531,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
u64 block;
@@ -3181,7 +3181,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
* as it modifies the DATA OUT buffer, which would corrupt user
* memory for SG_IO commands.
*/
- if (unlikely(blk_rq_is_passthrough(scmd->request)))
+ if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
goto invalid_opcode;
if (unlikely(scmd->cmd_len < 16)) {
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 9d0dd8f4c21c..121635aa8c00 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -48,8 +48,8 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
struct scsi_cmnd *cmd = qc->scsicmd;
bool swap = 1;
- if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
- !blk_rq_is_passthrough(cmd->request))
+ if (dev->class == ATA_DEV_ATA && cmd &&
+ !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
swap = 0;
/* Transfer multiple of 2 bytes */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cadcade65825..9badd7f7fe62 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -884,6 +884,8 @@ static void device_link_put_kref(struct device_link *link)
{
if (link->flags & DL_FLAG_STATELESS)
kref_put(&link->kref, __device_link_del);
+ else if (!device_is_registered(link->consumer))
+ __device_link_del(&link->kref);
else
WARN(1, "Unable to drop a managed device link reference\n");
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 63056cfd4b62..90ed1642304a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -74,7 +74,6 @@ config N64CART
config CDROM
tristate
- select BLK_SCSI_REQUEST
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
@@ -306,7 +305,7 @@ config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
select CDROM
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b7d663736d35..c38317979f74 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -239,8 +239,8 @@ static void nbd_dev_remove(struct nbd_device *nbd)
if (disk) {
del_gendisk(disk);
- blk_mq_free_tag_set(&nbd->tag_set);
blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&nbd->tag_set);
}
/*
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 7c6ae1036927..a295634597ba 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -27,7 +27,6 @@ config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
select CDROM
- select BLK_SCSI_REQUEST # only for the generic cdrom code
help
This option enables the high-level driver for ATAPI CD-ROM devices
connected through a parallel port. If you chose to build PARIDE
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 3b2b8e872beb..9b3298926356 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -1014,8 +1014,8 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
- blk_mq_free_tag_set(&disk->tag_set);
blk_cleanup_disk(p);
+ blk_mq_free_tag_set(&disk->tag_set);
pi_release(disk->pi);
}
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8d49f8fa98bb..d83fee21f6c5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
- struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
- dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
- command, (long)argument);
-
switch (command) {
case CDROMMULTISESSION:
- dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
for (i = 0; i < sizeof(struct cdrom_multisession); i++)
if (put_user(0, (char __user *)(argument + i)))
return -EFAULT;
return 0;
-
- case CDROM_GET_CAPABILITY: {
- struct gendisk *gd = info->gd;
- if (gd->flags & GENHD_FL_CD)
+ case CDROM_GET_CAPABILITY:
+ if (bdev->bd_disk->flags & GENHD_FL_CD)
return 0;
return -EINVAL;
- }
-
default:
- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
- command);*/
- return -EINVAL; /* same return as native Linux */
+ return -EINVAL;
}
-
- return 0;
}
static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
@@ -1177,36 +1164,6 @@ out_release_minors:
return err;
}
-static void xlvbd_release_gendisk(struct blkfront_info *info)
-{
- unsigned int minor, nr_minors, i;
- struct blkfront_ring_info *rinfo;
-
- if (info->rq == NULL)
- return;
-
- /* No more blkif_request(). */
- blk_mq_stop_hw_queues(info->rq);
-
- for_each_rinfo(info, rinfo, i) {
- /* No more gnttab callback work. */
- gnttab_cancel_free_callback(&rinfo->callback);
-
- /* Flush gnttab callback work. Must be done with no locks held. */
- flush_work(&rinfo->work);
- }
-
- del_gendisk(info->gd);
-
- minor = info->gd->first_minor;
- nr_minors = info->gd->minors;
- xlbd_release_minors(minor, nr_minors);
-
- blk_cleanup_disk(info->gd);
- info->gd = NULL;
- blk_mq_free_tag_set(&info->tag_set);
-}
-
/* Already hold rinfo->ring_lock. */
static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
{
@@ -1756,12 +1713,6 @@ abort_transaction:
return err;
}
-static void free_info(struct blkfront_info *info)
-{
- list_del(&info->info_list);
- kfree(info);
-}
-
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1880,13 +1831,6 @@ again:
xenbus_dev_fatal(dev, err, "%s", message);
destroy_blkring:
blkif_free(info, 0);
-
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
-
- dev_set_drvdata(&dev->dev, NULL);
-
return err;
}
@@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev)
static void blkfront_closing(struct blkfront_info *info)
{
struct xenbus_device *xbdev = info->xbdev;
- struct block_device *bdev = NULL;
-
- mutex_lock(&info->mutex);
+ struct blkfront_ring_info *rinfo;
+ unsigned int i;
- if (xbdev->state == XenbusStateClosing) {
- mutex_unlock(&info->mutex);
+ if (xbdev->state == XenbusStateClosing)
return;
- }
- if (info->gd)
- bdev = bdgrab(info->gd->part0);
-
- mutex_unlock(&info->mutex);
-
- if (!bdev) {
- xenbus_frontend_closed(xbdev);
- return;
- }
+ /* No more blkif_request(). */
+ blk_mq_stop_hw_queues(info->rq);
+ blk_set_queue_dying(info->rq);
+ set_capacity(info->gd, 0);
- mutex_lock(&bdev->bd_disk->open_mutex);
+ for_each_rinfo(info, rinfo, i) {
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&rinfo->callback);
- if (bdev->bd_openers) {
- xenbus_dev_error(xbdev, -EBUSY,
- "Device in use; refusing to close");
- xenbus_switch_state(xbdev, XenbusStateClosing);
- } else {
- xlvbd_release_gendisk(info);
- xenbus_frontend_closed(xbdev);
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_work(&rinfo->work);
}
- mutex_unlock(&bdev->bd_disk->open_mutex);
- bdput(bdev);
+ xenbus_frontend_closed(xbdev);
}
static void blkfront_setup_discard(struct blkfront_info *info)
@@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev,
break;
fallthrough;
case XenbusStateClosing:
- if (info)
- blkfront_closing(info);
+ blkfront_closing(info);
break;
}
}
@@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev,
static int blkfront_remove(struct xenbus_device *xbdev)
{
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
- struct block_device *bdev = NULL;
- struct gendisk *disk;
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
- if (!info)
- return 0;
-
- blkif_free(info, 0);
-
- mutex_lock(&info->mutex);
-
- disk = info->gd;
- if (disk)
- bdev = bdgrab(disk->part0);
-
- info->xbdev = NULL;
- mutex_unlock(&info->mutex);
-
- if (!bdev) {
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
- return 0;
- }
-
- /*
- * The xbdev was removed before we reached the Closed
- * state. See if it's safe to remove the disk. If the bdev
- * isn't closed yet, we let release take care of it.
- */
-
- mutex_lock(&disk->open_mutex);
- info = disk->private_data;
-
- dev_warn(disk_to_dev(disk),
- "%s was hot-unplugged, %d stale handles\n",
- xbdev->nodename, bdev->bd_openers);
+ del_gendisk(info->gd);
- if (info && !bdev->bd_openers) {
- xlvbd_release_gendisk(info);
- disk->private_data = NULL;
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
- }
+ mutex_lock(&blkfront_mutex);
+ list_del(&info->info_list);
+ mutex_unlock(&blkfront_mutex);
- mutex_unlock(&disk->open_mutex);
- bdput(bdev);
+ blkif_free(info, 0);
+ xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ kfree(info);
return 0;
}
@@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev)
return info->is_ready && info->xbdev;
}
-static int blkif_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct blkfront_info *info;
- int err = 0;
-
- mutex_lock(&blkfront_mutex);
-
- info = disk->private_data;
- if (!info) {
- /* xbdev gone */
- err = -ERESTARTSYS;
- goto out;
- }
-
- mutex_lock(&info->mutex);
-
- if (!info->gd)
- /* xbdev is closed */
- err = -ERESTARTSYS;
-
- mutex_unlock(&info->mutex);
-
-out:
- mutex_unlock(&blkfront_mutex);
- return err;
-}
-
-static void blkif_release(struct gendisk *disk, fmode_t mode)
-{
- struct blkfront_info *info = disk->private_data;
- struct xenbus_device *xbdev;
-
- mutex_lock(&blkfront_mutex);
- if (disk->part0->bd_openers)
- goto out_mutex;
-
- /*
- * Check if we have been instructed to close. We will have
- * deferred this request, because the bdev was still open.
- */
-
- mutex_lock(&info->mutex);
- xbdev = info->xbdev;
-
- if (xbdev && xbdev->state == XenbusStateClosing) {
- /* pending switch to state closed */
- dev_info(disk_to_dev(disk), "releasing disk\n");
- xlvbd_release_gendisk(info);
- xenbus_frontend_closed(info->xbdev);
- }
-
- mutex_unlock(&info->mutex);
-
- if (!xbdev) {
- /* sudden device removal */
- dev_info(disk_to_dev(disk), "releasing disk\n");
- xlvbd_release_gendisk(info);
- disk->private_data = NULL;
- free_info(info);
- }
-
-out_mutex:
- mutex_unlock(&blkfront_mutex);
-}
-
static const struct block_device_operations xlvbd_block_fops =
{
.owner = THIS_MODULE,
- .open = blkif_open,
- .release = blkif_release,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index feb827eefd1a..bd2e5b1560f5 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -629,7 +629,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (CDROM_CAN(CDC_MRW_W))
cdi->exit = cdrom_mrw_exit;
- if (cdi->disk)
+ if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
cdi->cdda_method = CDDA_OLD;
@@ -2159,81 +2159,26 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
- struct request_queue *q = cdi->disk->queue;
- struct request *rq;
- struct scsi_request *req;
- struct bio *bio;
- unsigned int len;
+ int max_frames = (queue_max_sectors(cdi->disk->queue) << 9) /
+ CD_FRAMESIZE_RAW;
int nr, ret = 0;
- if (!q)
- return -ENXIO;
-
- if (!blk_queue_scsi_passthrough(q)) {
- WARN_ONCE(true,
- "Attempt read CDDA info through a non-SCSI queue\n");
- return -EINVAL;
- }
-
cdi->last_sense = 0;
while (nframes) {
- nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
- nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
-
- len = nr * CD_FRAMESIZE_RAW;
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- break;
- }
- req = scsi_req(rq);
-
- ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
- if (ret) {
- blk_put_request(rq);
- break;
- }
-
- req->cmd[0] = GPCMD_READ_CD;
- req->cmd[1] = 1 << 2;
- req->cmd[2] = (lba >> 24) & 0xff;
- req->cmd[3] = (lba >> 16) & 0xff;
- req->cmd[4] = (lba >> 8) & 0xff;
- req->cmd[5] = lba & 0xff;
- req->cmd[6] = (nr >> 16) & 0xff;
- req->cmd[7] = (nr >> 8) & 0xff;
- req->cmd[8] = nr & 0xff;
- req->cmd[9] = 0xf8;
-
- req->cmd_len = 12;
- rq->timeout = 60 * HZ;
- bio = rq->bio;
-
- blk_execute_rq(cdi->disk, rq, 0);
- if (scsi_req(rq)->result) {
- struct scsi_sense_hdr sshdr;
-
- ret = -EIO;
- scsi_normalize_sense(req->sense, req->sense_len,
- &sshdr);
- cdi->last_sense = sshdr.sense_key;
- }
-
- if (blk_rq_unmap_user(bio))
- ret = -EFAULT;
- blk_put_request(rq);
+ else
+ nr = min(nframes, max_frames);
+ ret = cdi->ops->read_cdda_bpc(cdi, ubuf, lba, nr,
+ &cdi->last_sense);
if (ret)
break;
nframes -= nr;
lba += nr;
- ubuf += len;
+ ubuf += (nr * CD_FRAMESIZE_RAW);
}
return ret;
@@ -3357,13 +3302,6 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
void __user *argp = (void __user *)arg;
int ret;
- /*
- * Try the generic SCSI command ioctl's first.
- */
- ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
- if (ret != -ENOTTY)
- return ret;
-
switch (cmd) {
case CDROMMULTISESSION:
return cdrom_ioctl_multisession(cdi, argp);
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index 027484ecfb0d..3c99696b145e 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -75,6 +75,7 @@ static int __op_panel_update_display(void)
rc);
break;
}
+ break;
case OPAL_SUCCESS:
break;
default:
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 50b5269586a4..ae24e0397d3c 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -30,8 +30,9 @@ enum clk_ids {
CLK_PLL2_DIV20,
CLK_PLL3,
CLK_PLL3_DIV2,
+ CLK_PLL3_DIV2_4,
+ CLK_PLL3_DIV2_4_2,
CLK_PLL3_DIV4,
- CLK_PLL3_DIV8,
CLK_PLL4,
CLK_PLL5,
CLK_PLL5_DIV2,
@@ -42,12 +43,13 @@ enum clk_ids {
};
/* Divider tables */
-static const struct clk_div_table dtable_3b[] = {
+static const struct clk_div_table dtable_1_32[] = {
{0, 1},
{1, 2},
{2, 4},
{3, 8},
{4, 32},
+ {0, 0},
};
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
@@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+ DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
+ DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
- DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8),
/* Core output clk */
DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
- dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+ dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
- DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8,
- DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
+ DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
+ DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
- DEF_MOD("gic", R9A07G044_CLK_GIC600,
- R9A07G044_CLK_P1,
- 0x514, BIT(0), (BIT(0) | BIT(1))),
- DEF_MOD("ia55", R9A07G044_CLK_IA55,
- R9A07G044_CLK_P1,
- 0x518, (BIT(0) | BIT(1)), BIT(0)),
- DEF_MOD("scif0", R9A07G044_CLK_SCIF0,
- R9A07G044_CLK_P0,
- 0x584, BIT(0), BIT(0)),
- DEF_MOD("scif1", R9A07G044_CLK_SCIF1,
- R9A07G044_CLK_P0,
- 0x584, BIT(1), BIT(1)),
- DEF_MOD("scif2", R9A07G044_CLK_SCIF2,
- R9A07G044_CLK_P0,
- 0x584, BIT(2), BIT(2)),
- DEF_MOD("scif3", R9A07G044_CLK_SCIF3,
- R9A07G044_CLK_P0,
- 0x584, BIT(3), BIT(3)),
- DEF_MOD("scif4", R9A07G044_CLK_SCIF4,
- R9A07G044_CLK_P0,
- 0x584, BIT(4), BIT(4)),
- DEF_MOD("sci0", R9A07G044_CLK_SCI0,
- R9A07G044_CLK_P0,
- 0x588, BIT(0), BIT(0)),
+ DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
+ 0x514, 0),
+ DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
+ 0x518, 1),
+ DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 0),
+ DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 1),
+ DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 2),
+ DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 3),
+ DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 4),
+ DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
+ 0x588, 0),
+};
+
+static struct rzg2l_reset r9a07g044_resets[] = {
+ DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0),
+ DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1),
+ DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
- MOD_CLK_BASE + R9A07G044_CLK_GIC600,
+ MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
};
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
@@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Module Clocks */
.mod_clks = r9a07g044_mod_clks,
.num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks),
- .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1,
+ .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1,
+
+ /* Resets */
+ .resets = r9a07g044_resets,
+ .num_resets = ARRAY_SIZE(r9a07g044_resets),
};
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c
index 5009b9e48b13..e7c59af2a1d8 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.c
+++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c
@@ -47,9 +47,9 @@
#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
#define CLK_ON_R(reg) (reg)
-#define CLK_MON_R(reg) (0x680 - 0x500 + (reg))
-#define CLK_RST_R(reg) (0x800 - 0x500 + (reg))
-#define CLK_MRST_R(reg) (0x980 - 0x500 + (reg))
+#define CLK_MON_R(reg) (0x180 + (reg))
+#define CLK_RST_R(reg) (reg)
+#define CLK_MRST_R(reg) (0x180 + (reg))
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
@@ -78,6 +78,7 @@ struct rzg2l_cpg_priv {
struct clk **clks;
unsigned int num_core_clks;
unsigned int num_mod_clks;
+ unsigned int num_resets;
unsigned int last_dt_core_clk;
struct raw_notifier_head notifiers;
@@ -315,15 +316,13 @@ fail:
*
* @hw: handle between common and hardware-specific interfaces
* @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
* @priv: CPG/MSTP private data
*/
struct mstp_clock {
struct clk_hw hw;
u16 off;
- u8 onoff;
- u8 reset;
+ u8 bit;
struct rzg2l_cpg_priv *priv;
};
@@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
struct device *dev = priv->dev;
unsigned long flags;
unsigned int i;
+ u32 bitmask = BIT(clock->bit);
u32 value;
if (!clock->off) {
@@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
spin_lock_irqsave(&priv->rmw_lock, flags);
if (enable)
- value = (clock->onoff << 16) | clock->onoff;
+ value = (bitmask << 16) | bitmask;
else
- value = clock->onoff << 16;
+ value = bitmask << 16;
writel(value, priv->base + CLK_ON_R(reg));
spin_unlock_irqrestore(&priv->rmw_lock, flags);
@@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff))
+ if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
break;
cpu_relax();
}
@@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
{
struct mstp_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
+ u32 bitmask = BIT(clock->bit);
u32 value;
if (!clock->off) {
@@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
value = readl(priv->base + CLK_MON_R(clock->off));
- return !(value & clock->onoff);
+ return !(value & bitmask);
}
static const struct clk_ops rzg2l_mod_clock_ops = {
@@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
init.num_parents = 1;
clock->off = mod->off;
- clock->onoff = mod->onoff;
- clock->reset = mod->reset;
+ clock->bit = mod->bit;
clock->priv = priv;
clock->hw.init = &init;
@@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 dis = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 dis = BIT(info->resets[id].bit);
u32 we = dis << 16;
- dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
/* Reset module */
writel(we, priv->base + CLK_RST_R(reg));
@@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 value = info->mod_clks[id].reset << 16;
+ unsigned int reg = info->resets[id].off;
+ u32 value = BIT(info->resets[id].bit) << 16;
- dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
writel(value, priv->base + CLK_RST_R(reg));
return 0;
@@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 dis = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 dis = BIT(info->resets[id].bit);
u32 value = (dis << 16) | dis;
- dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
+ CLK_RST_R(reg));
writel(value, priv->base + CLK_RST_R(reg));
return 0;
@@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 bitmask = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 bitmask = BIT(info->resets[id].bit);
return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
}
@@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = {
static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
+ struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ const struct rzg2l_cpg_info *info = priv->info;
unsigned int id = reset_spec->args[0];
- if (id >= rcdev->nr_resets) {
+ if (id >= rcdev->nr_resets || !info->resets[id].off) {
dev_err(rcdev->dev, "Invalid reset index %u\n", id);
return -EINVAL;
}
@@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
priv->rcdev.dev = priv->dev;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
- priv->rcdev.nr_resets = priv->num_mod_clks;
+ priv->rcdev.nr_resets = priv->num_resets;
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
@@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device
{
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
+ bool once = true;
struct clk *clk;
int error;
int i = 0;
while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
&clkspec)) {
- if (rzg2l_cpg_is_pm_clk(&clkspec))
- goto found;
-
- of_node_put(clkspec.np);
+ if (rzg2l_cpg_is_pm_clk(&clkspec)) {
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
+ if (error) {
+ of_node_put(clkspec.np);
+ goto err;
+ }
+ }
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n",
+ error);
+ goto fail_put;
+ }
+ } else {
+ of_node_put(clkspec.np);
+ }
i++;
}
return 0;
-found:
- clk = of_clk_get_from_provider(&clkspec);
- of_node_put(clkspec.np);
-
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- error = pm_clk_create(dev);
- if (error)
- goto fail_put;
-
- error = pm_clk_add_clk(dev, clk);
- if (error)
- goto fail_destroy;
-
- return 0;
+fail_put:
+ clk_put(clk);
fail_destroy:
pm_clk_destroy(dev);
-fail_put:
- clk_put(clk);
+err:
return error;
}
@@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
priv->clks = clks;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
+ priv->num_resets = info->num_resets;
priv->last_dt_core_clk = info->last_dt_core_clk;
for (i = 0; i < nclks; i++)
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/renesas-rzg2l-cpg.h
index 3948bdd8afc9..63695280ce8b 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.h
+++ b/drivers/clk/renesas/renesas-rzg2l-cpg.h
@@ -21,6 +21,7 @@
#define DDIV_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
+#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
/**
@@ -76,26 +77,40 @@ enum clk_types {
* @id: clock index in array containing all Core and Module Clocks
* @parent: id of parent clock
* @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
*/
struct rzg2l_mod_clk {
const char *name;
unsigned int id;
unsigned int parent;
u16 off;
- u8 onoff;
- u8 reset;
+ u8 bit;
};
-#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset) \
- [_id] = { \
+#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+ { \
.name = _name, \
- .id = MOD_CLK_BASE + _id, \
+ .id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
.off = (_off), \
- .onoff = (_onoff), \
- .reset = (_reset) \
+ .bit = (_bit), \
+ }
+
+/**
+ * struct rzg2l_reset - Reset definitions
+ *
+ * @off: register offset
+ * @bit: reset bit
+ */
+struct rzg2l_reset {
+ u16 off;
+ u8 bit;
+};
+
+#define DEF_RST(_id, _off, _bit) \
+ [_id] = { \
+ .off = (_off), \
+ .bit = (_bit) \
}
/**
@@ -126,6 +141,10 @@ struct rzg2l_cpg_info {
unsigned int num_mod_clks;
unsigned int num_hw_mod_clks;
+ /* Resets */
+ const struct rzg2l_reset *resets;
+ unsigned int num_resets;
+
/* Critical Module Clocks that should not be disabled */
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 182a4dbca095..c538a153ee82 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -942,8 +942,6 @@ static int __init longhaul_init(void)
return cpufreq_register_driver(&longhaul_driver);
case 10:
pr_err("Use acpi-cpufreq driver for VIA C7\n");
- default:
- ;
}
return -ENODEV;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 20d9bddbb985..394e6e1e9686 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct dma_fence **fences, **nfences, **a_fences, **b_fences;
- int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
+ struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
+ int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
if (!sync_file)
@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
- for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+ for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
fences = nfences;
}
- if (sync_file_set_fence(sync_file, fences, i) < 0) {
- kfree(fences);
+ if (sync_file_set_fence(sync_file, fences, i) < 0)
goto err;
- }
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
err:
+ while (i)
+ dma_fence_put(fences[--i]);
+ kfree(fences);
fput(sync_file->file);
return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 104ad420abbe..baab1ca9f621 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_1:
case IDMAC_IC_7:
ipu_channel_set_priority(ipu, channel, true);
+ break;
default:
break;
}
@@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_0:
case IDMAC_SDC_1:
n_desc = 4;
+ break;
default:
break;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index c1a69149c8bf..4a51fdbf5aa9 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
case 16:
if (is_mpc8308)
return false;
+ break;
case 1:
case 2:
case 4:
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 96ad21869ba7..a35858610780 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud)
ud->tchan_cnt),
ud->rchan_cnt - bitmap_weight(ud->rchan_map,
ud->rchan_cnt));
+ break;
default:
break;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 91164c5f0757..2fc4c3f91fd5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -271,7 +271,7 @@ config EDAC_PND2
config EDAC_IGEN6
tristate "Intel client SoC Integrated MC"
depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
- depends on X64_64 && X86_MCE_INTEL
+ depends on X86_64 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
client SoC Integrated Memory Controller using In-Band ECC IP.
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 83166e02b191..00fe595a5bc8 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -46,9 +46,6 @@ static int ffa_device_probe(struct device *dev)
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- if (!ffa_device_match(dev, dev->driver))
- return -ENODEV;
-
return ffa_drv->probe(ffa_dev);
}
@@ -99,6 +96,9 @@ int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
{
int ret;
+ if (!driver->probe)
+ return -EINVAL;
+
driver->driver.bus = &ffa_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index b1edb4b2e94a..c9fb56afbcb4 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -120,7 +120,7 @@
#define PACK_TARGET_INFO(s, r) \
(FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
-/**
+/*
* FF-A specification mentions explicitly about '4K pages'. This should
* not be confused with the kernel PAGE_SIZE, which is the translation
* granule kernel is configured and may be one among 4K, 16K and 64K.
@@ -149,8 +149,10 @@ static const int ffa_linux_errmap[] = {
static inline int ffa_to_linux_errno(int errno)
{
- if (errno < FFA_RET_SUCCESS && errno >= -ARRAY_SIZE(ffa_linux_errmap))
- return ffa_linux_errmap[-errno];
+ int err_idx = -errno;
+
+ if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
+ return ffa_linux_errmap[err_idx];
return -EINVAL;
}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 784cf0027da3..6c7e24935eca 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (!id)
- return -ENODEV;
if (!scmi_dev->handle)
return -EPROBE_DEFER;
@@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
{
int retval;
+ if (!driver->probe)
+ return -EINVAL;
+
retval = scmi_protocol_device_request(driver->id_table);
if (retval)
return retval;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 66e5e694be7d..9b2e8d42a992 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -47,7 +47,6 @@ enum scmi_error_codes {
SCMI_ERR_GENERIC = -8, /* Generic Error */
SCMI_ERR_HARDWARE = -9, /* Hardware Error */
SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
- SCMI_ERR_MAX
};
/* List of all SCMI devices active in system */
@@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = {
static inline int scmi_to_linux_errno(int errno)
{
- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
- return scmi_linux_errmap[-errno];
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
return -EIO;
}
@@ -1025,8 +1026,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */
- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
+ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
+ dev_err(dev,
+ "Invalid maximum messages %d, not in range [1 - %lu]\n",
desc->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
@@ -1137,6 +1139,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
* @proto_id and @name: if device was still not existent it is created as a
* child of the specified SCMI instance @info and its transport properly
* initialized as usual.
+ *
+ * Return: A properly initialized scmi device, NULL otherwise.
*/
static inline struct scmi_device *
scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index d860bebd984a..0efd20cd9d69 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res)
*
* Generic devres managed helper to register a notifier_block against a
* protocol event.
+ *
+ * Return: 0 on Success
*/
static int scmi_devm_notifier_register(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
@@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* Generic devres managed helper to explicitly un-register a notifier_block
* against a protocol event, which was previously registered using the above
* @scmi_devm_notifier_register.
+ *
+ * Return: 0 on Success
*/
static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 2c88aa221559..308471586381 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get {
struct scmi_resp_sensor_reading_complete {
__le32 id;
- __le64 readings;
+ __le32 readings_low;
+ __le32 readings_high;
};
struct scmi_sensor_reading_resp {
@@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
resp = t->rx.buf;
if (le32_to_cpu(resp->id) == sensor_id)
- *value = get_unaligned_le64(&resp->readings);
+ *value =
+ get_unaligned_le64(&resp->readings_low);
else
ret = -EPROTO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index db16b3e83694..cf62f43a03da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 3b8e1ee8c475..4fb15750b9bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
- struct amdgpu_sync *sync,
- bool *table_freed)
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
- bool no_update_pte,
- bool *table_freed)
+ bool no_update_pte)
{
int ret;
@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
- ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+ ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem,
- void *drm_priv, bool *table_freed)
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
- is_invalid_userptr, table_freed);
+ is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 76fe5b71e35d..30fa1f61e0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71beb0db0125..abb928894eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
+ {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b3404c43a911..d0d9bc445d7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 32ce0e679dc7..83af307e97cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
+static void amdgpu_restore_msix(struct amdgpu_device *adev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
+ return;
+
+ /* VF FLR */
+ ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
/**
* amdgpu_irq_init - initialize interrupt handling
*
@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_restore_msix(adev);
+
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c13b02caf8c3..fc66aca28594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
- struct ras_query_if *info)
+ struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
-/* get the total error counts on all IPs */
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count)
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * adev: pointer to AMD GPU device
+ * ce_count: pointer to an integer to be set to the count of correctible errors.
+ * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * errors.
+ *
+ * If set, @ce_count or @ue_count, count and return the corresponding
+ * error counts in those integer pointers. Return 0 if the device
+ * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
+ */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
if (!adev->ras_enabled || !con)
- return;
+ return -EOPNOTSUPP;
+
+ /* Don't count since no reporting.
+ */
+ if (!ce_count && !ue_count)
+ return 0;
ce = 0;
ue = 0;
@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_query_if info = {
.head = obj->head,
};
+ int res;
- if (amdgpu_ras_query_error_status(adev, &info))
- return;
+ res = amdgpu_ras_query_error_status(adev, &info);
+ if (res)
+ return res;
ce += info.ce_count;
ue += info.ue_count;
@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (ue_count)
*ue_count = ue;
+
+ return 0;
}
/* query/inject/cure end */
@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
pm_runtime_mark_last_busy(dev->dev);
Out:
@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
return 0;
cleanup:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 256cea5d34f2..b504ed8c9b50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count);
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 79cfa2d68487..078c068937fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
if (table_freed)
- *table_freed = *table_freed || params.table_freed;
+ *table_freed = params.table_freed;
error_unlock:
amdgpu_vm_eviction_unlock(vm);
@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
* @clear: if true clear the entries
- * @table_freed: return true if page table is freed
*
* Fill in the page table entries for @bo_va.
*
@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed)
+ bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start,
mapping->last, update_flags,
mapping->offset, mem,
- pages_addr, last_update, table_freed);
+ pages_addr, last_update, NULL);
if (r)
return r;
}
@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
}
@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else
clear = true;
- r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index ddb85a85cbba..f8fa653d4da7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed);
+ bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 33324427b555..7e0d8c092c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
+ adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 3ee481557fc9..ff2307d7ee0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 48e588d3c409..9f7aac435d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 67541c30327a..e48acdd03c1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0;
int i;
uint32_t *devices_arr = NULL;
- bool table_freed = false;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- peer->kgd, (struct kgd_mem *)mem,
- peer_pdd->drm_priv, &table_freed);
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
i, args->n_devices);
@@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed) {
- for (i = 0; i < args->n_devices; i++) {
- peer = kfd_device_by_id(devices_arr[i]);
- if (WARN_ON_ONCE(!peer))
- continue;
- peer_pdd = kfd_get_process_device_data(peer, p);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
- }
+ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+ peer_pdd = kfd_get_process_device_data(peer, p);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
+
kfree(devices_arr);
return err;
@@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
- if (err) {
- pr_debug("Sync memory failed, wait interrupted by user signal\n");
- goto sync_memory_failed;
- }
-
- /* Flush TLBs after waiting for the page table updates to complete */
- for (i = 0; i < args->n_devices; i++) {
- peer = kfd_device_by_id(devices_arr[i]);
- if (WARN_ON_ONCE(!peer))
- continue;
- peer_pdd = kfd_get_process_device_data(peer, p);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
- }
-
kfree(devices_arr);
+ mutex_unlock(&p->mutex);
+
return 0;
bind_process_to_device_failed:
@@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 21ec8a18cad2..8a2c6fc438c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
- pdd->drm_priv, NULL);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
if (err)
goto err_map_mem;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9a71d8919bd6..c7b364e4a287 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange)
static void
svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
- struct svm_range *prange, int32_t gpuidx)
+ int32_t gpuidx)
{
struct kfd_process_device *pdd;
- if (gpuidx == MAX_GPU_INSTANCE)
- /* fault is on different page of same range
- * or fault is skipped to recover later
- */
- pdd = svm_range_get_pdd_by_adev(prange, adev);
- else
- /* fault recovered
- * or fault cannot recover because GPU no access on the range
- */
- pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ /* fault is on different page of same range
+ * or fault is skipped to recover later
+ * or fault is on invalid virtual address
+ */
+ if (gpuidx == MAX_GPU_INSTANCE) {
+ uint32_t gpuid;
+ int r;
+ r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+ if (r < 0)
+ return;
+ }
+
+ /* fault is recovered
+ * or fault cannot recover because GPU no access on the range
+ */
+ pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (pdd)
WRITE_ONCE(pdd->faults, pdd->faults + 1);
}
@@ -2525,7 +2531,7 @@ out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
- svm_range_count_fault(adev, p, prange, gpuidx);
+ svm_range_count_fault(adev, p, gpuidx);
mmput(mm);
out:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 01e1062dc235..d3a2a5ff57e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
- if (dm->backlight_dev)
+ if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
#endif
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 66db5e988bc1..dad4a4c18bcf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -31,8 +31,8 @@
#include "dcn31_smu.h"
#include "yellow_carp_offset.h"
-#include "mp/mp_13_0_1_offset.h"
-#include "mp/mp_13_0_1_sh_mask.h"
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index b8832bdde2bc..6da226bf11d5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
{
enum dc_status status = DC_OK;
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- status = configure_lttpr_mode_non_transparent(link, lt_settings);
- else
+ if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
status = configure_lttpr_mode_transparent(link);
+ else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
return status;
}
@@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries(
link_enc = stream->link_enc;
else
link_enc = link->link_enc;
- ASSERT(link_enc);
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index fc1fc1a4bf8b..836864a5a5dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -390,7 +390,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi_tmds)
+ if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
deleted file mode 100644
index dfacc6b5d89d..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_OFFSET_HEADER
-#define _mp_13_0_1_OFFSET_HEADER
-
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-// base address: 0x0
-#define regMP0_SMN_C2PMSG_32 0x0060
-#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_33 0x0061
-#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_34 0x0062
-#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_35 0x0063
-#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_36 0x0064
-#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_37 0x0065
-#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_38 0x0066
-#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_39 0x0067
-#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_40 0x0068
-#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_41 0x0069
-#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_42 0x006a
-#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_43 0x006b
-#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_44 0x006c
-#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_45 0x006d
-#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_46 0x006e
-#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_47 0x006f
-#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_48 0x0070
-#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_49 0x0071
-#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_50 0x0072
-#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_51 0x0073
-#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_52 0x0074
-#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_53 0x0075
-#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_54 0x0076
-#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_55 0x0077
-#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_56 0x0078
-#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_57 0x0079
-#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_58 0x007a
-#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_59 0x007b
-#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_60 0x007c
-#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_61 0x007d
-#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_62 0x007e
-#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_63 0x007f
-#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_64 0x0080
-#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_65 0x0081
-#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_66 0x0082
-#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_67 0x0083
-#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_68 0x0084
-#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_69 0x0085
-#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_70 0x0086
-#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_71 0x0087
-#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_72 0x0088
-#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_73 0x0089
-#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_74 0x008a
-#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_75 0x008b
-#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_76 0x008c
-#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_77 0x008d
-#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_78 0x008e
-#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_79 0x008f
-#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_80 0x0090
-#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_81 0x0091
-#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_82 0x0092
-#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_83 0x0093
-#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_84 0x0094
-#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_85 0x0095
-#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_86 0x0096
-#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_87 0x0097
-#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_88 0x0098
-#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_89 0x0099
-#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_90 0x009a
-#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_91 0x009b
-#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_92 0x009c
-#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_93 0x009d
-#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_94 0x009e
-#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_95 0x009f
-#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_96 0x00a0
-#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_97 0x00a1
-#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_98 0x00a2
-#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_99 0x00a3
-#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_100 0x00a4
-#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_101 0x00a5
-#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_102 0x00a6
-#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_103 0x00a7
-#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP0_SMN_IH_CREDIT 0x00c1
-#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT 0x00c2
-#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
-#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-// base address: 0x0
-#define regMP1_SMN_C2PMSG_32 0x0260
-#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_33 0x0261
-#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_34 0x0262
-#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_35 0x0263
-#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_36 0x0264
-#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_37 0x0265
-#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_38 0x0266
-#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_39 0x0267
-#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_40 0x0268
-#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_41 0x0269
-#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_42 0x026a
-#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_43 0x026b
-#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_44 0x026c
-#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_45 0x026d
-#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_46 0x026e
-#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_47 0x026f
-#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_48 0x0270
-#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_49 0x0271
-#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_50 0x0272
-#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_51 0x0273
-#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_52 0x0274
-#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_53 0x0275
-#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_54 0x0276
-#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_55 0x0277
-#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_56 0x0278
-#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_57 0x0279
-#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_58 0x027a
-#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_59 0x027b
-#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_60 0x027c
-#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_61 0x027d
-#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_62 0x027e
-#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_63 0x027f
-#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_64 0x0280
-#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_65 0x0281
-#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_66 0x0282
-#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_67 0x0283
-#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_68 0x0284
-#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_69 0x0285
-#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_70 0x0286
-#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_71 0x0287
-#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_72 0x0288
-#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_73 0x0289
-#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_74 0x028a
-#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_75 0x028b
-#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_76 0x028c
-#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_77 0x028d
-#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_78 0x028e
-#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_79 0x028f
-#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_80 0x0290
-#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_81 0x0291
-#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_82 0x0292
-#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_83 0x0293
-#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_84 0x0294
-#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_85 0x0295
-#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_86 0x0296
-#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_87 0x0297
-#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_88 0x0298
-#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_89 0x0299
-#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_90 0x029a
-#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_91 0x029b
-#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_92 0x029c
-#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_93 0x029d
-#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_94 0x029e
-#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_95 0x029f
-#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_96 0x02a0
-#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_97 0x02a1
-#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_98 0x02a2
-#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_99 0x02a3
-#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_100 0x02a4
-#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_101 0x02a5
-#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_102 0x02a6
-#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_103 0x02a7
-#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP1_SMN_IH_CREDIT 0x02c1
-#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT 0x02c2
-#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
-#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-#define regMP1_SMN_FPS_CNT 0x02c4
-#define regMP1_SMN_FPS_CNT_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH0 0x0340
-#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH1 0x0341
-#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH2 0x0342
-#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH3 0x0343
-#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH4 0x0344
-#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH5 0x0345
-#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH6 0x0346
-#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH7 0x0347
-#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
deleted file mode 100644
index 2d5e8b58e693..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_SH_MASK_HEADER
-#define _mp_13_0_1_SH_MASK_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-//MP0_SMN_C2PMSG_32
-#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_33
-#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_34
-#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_35
-#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_36
-#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_37
-#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_38
-#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_39
-#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_40
-#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_41
-#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_42
-#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_43
-#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_44
-#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_45
-#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_46
-#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_47
-#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_48
-#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_49
-#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_50
-#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_51
-#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_52
-#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_53
-#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_54
-#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_55
-#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_56
-#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_57
-#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_58
-#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_59
-#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_60
-#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_61
-#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_62
-#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_63
-#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_64
-#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_65
-#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_66
-#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_67
-#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_68
-#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_69
-#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_70
-#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_71
-#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_72
-#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_73
-#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_74
-#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_75
-#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_76
-#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_77
-#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_78
-#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_79
-#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_80
-#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_81
-#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_82
-#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_83
-#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_84
-#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_85
-#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_86
-#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_87
-#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_88
-#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_89
-#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_90
-#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_91
-#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_92
-#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_93
-#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_94
-#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_95
-#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_96
-#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_97
-#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_98
-#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_99
-#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_100
-#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_101
-#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_102
-#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_103
-#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_IH_CREDIT
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP0_SMN_IH_SW_INT
-#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP0_SMN_IH_SW_INT_CTRL
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-//MP1_FIRMWARE_FLAGS
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
-#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
-#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-//MP1_SMN_C2PMSG_32
-#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_33
-#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_34
-#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_35
-#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_36
-#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_37
-#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_38
-#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_39
-#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_40
-#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_41
-#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_42
-#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_43
-#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_44
-#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_45
-#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_46
-#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_47
-#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_48
-#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_49
-#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_50
-#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_51
-#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_52
-#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_53
-#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_54
-#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_55
-#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_56
-#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_57
-#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_58
-#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_59
-#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_60
-#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_61
-#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_62
-#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_63
-#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_64
-#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_65
-#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_66
-#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_67
-#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_68
-#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_69
-#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_70
-#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_71
-#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_72
-#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_73
-#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_74
-#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_75
-#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_76
-#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_77
-#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_78
-#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_79
-#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_80
-#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_81
-#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_82
-#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_83
-#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_84
-#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_85
-#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_86
-#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_87
-#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_88
-#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_89
-#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_90
-#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_91
-#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_92
-#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_93
-#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_94
-#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_95
-#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_96
-#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_97
-#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_98
-#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_99
-#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_100
-#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_101
-#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_102
-#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_103
-#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_IH_CREDIT
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP1_SMN_IH_SW_INT
-#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP1_SMN_IH_SW_INT_CTRL
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-//MP1_SMN_FPS_CNT
-#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
-#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH0
-#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH1
-#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH2
-#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH3
-#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH4
-#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH5
-#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH6
-#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH7
-#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index 6119a36b2cba..3fea2430dec0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -26,6 +26,7 @@
#include "amdgpu_smu.h"
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
#define SMU13_DRIVER_IF_VERSION_ALDE 0x07
/* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
deleted file mode 100644
index b6c976a4d578..000000000000
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __SMU_V13_0_1_H__
-#define __SMU_V13_0_1_H__
-
-#include "amdgpu_smu.h"
-
-#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-/* address block */
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
-
-
-#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu);
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu);
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu);
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu);
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu);
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu);
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable);
-#endif
-#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 388c5cb5c647..0a5d46ac9ccd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
if (amdgpu_runtime_pm == 2)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9b3a8503f5cd..d4c4c495762c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o
+SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a3dc7194aaf8..a421ba85bd6d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case CHIP_ALDEBARAN:
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
+ case CHIP_YELLOW_CARP:
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
+ break;
default:
dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
@@ -694,6 +697,27 @@ failed:
return ret;
}
+int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->asic_type) {
+ case CHIP_YELLOW_CARP:
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
deleted file mode 100644
index 61917b49f2bf..000000000000
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-//#include <linux/reboot.h>
-
-#define SWSMU_CODE_LAYER_L3
-
-#include "amdgpu.h"
-#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
-#include "soc15_common.h"
-#include "smu_cmn.h"
-#include "atomfirmware.h"
-#include "amdgpu_atomfirmware.h"
-#include "amdgpu_atombios.h"
-#include "atom.h"
-
-#include "asic_reg/mp/mp_13_0_1_offset.h"
-#include "asic_reg/mp/mp_13_0_1_sh_mask.h"
-
-/*
- * DO NOT use these for err/warn/info/debug messages.
- * Use dev_err, dev_warn, dev_info and dev_dbg instead.
- * They are more MGPU friendly.
- */
-#undef pr_err
-#undef pr_warn
-#undef pr_info
-#undef pr_debug
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t mp1_fw_flags;
-
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return 0;
-
- return -EIO;
-}
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu)
-{
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_major = (smu_version >> 16) & 0xffff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
-
- switch (smu->adev->asic_type) {
- case CHIP_YELLOW_CARP:
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP;
- break;
-
- default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV;
- break;
- }
-
- dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
- smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a warning message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
- }
-
- return ret;
-}
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- kfree(smu_table->clocks_table);
- smu_table->clocks_table = NULL;
-
- kfree(smu_table->metrics_table);
- smu_table->metrics_table = NULL;
-
- kfree(smu_table->watermarks_table);
- smu_table->watermarks_table = NULL;
-
- return 0;
-}
-
-static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev,
- uint8_t clk_id,
- uint8_t syspll_id,
- uint32_t *clk_freq)
-{
- struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
- struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
- int ret, index;
-
- input.clk_id = clk_id;
- input.syspll_id = syspll_id;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- return 0;
-}
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu)
-{
- int ret, index;
- uint16_t size;
- uint8_t frev, crev;
- struct atom_common_table_header *header;
- struct atom_firmware_info_v3_4 *v_3_4;
- struct atom_firmware_info_v3_3 *v_3_3;
- struct atom_firmware_info_v3_1 *v_3_1;
-
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
-
- ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
- (uint8_t **)&header);
- if (ret)
- return ret;
-
- if (header->format_revision != 3) {
- dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
- return -EINVAL;
- }
-
- switch (header->content_revision) {
- case 0:
- case 1:
- case 2:
- v_3_1 = (struct atom_firmware_info_v3_1 *)header;
- smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
- break;
- case 3:
- v_3_3 = (struct atom_firmware_info_v3_3 *)header;
- smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
- break;
- case 4:
- default:
- v_3_4 = (struct atom_firmware_info_v3_4 *)header;
- smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
- break;
- }
-
- smu->smu_table.boot_values.format_revision = header->format_revision;
- smu->smu_table.boot_values.content_revision = header->content_revision;
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.socclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dcefclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_ECLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.eclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_VCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.vclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dclk);
-
- if ((smu->smu_table.boot_values.format_revision == 3) &&
- (smu->smu_table.boot_values.content_revision >= 2))
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
- (uint8_t)SMU11_SYSPLL1_2_ID,
- &smu->smu_table.boot_values.fclk);
-
- return 0;
-}
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu)
-{
- struct smu_table *driver_table = &smu->smu_table.driver_table;
- int ret = 0;
-
- if (!driver_table->mc_address)
- return 0;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address),
- NULL);
-
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address),
- NULL);
-
- return ret;
-}
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable)
-{
- int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
- if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
- return 0;
- if (enable)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
- else
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
- break;
- default:
- break;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 18a1ffdca227..0cfeb9fc7c03 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -25,7 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
+#include "smu_v13_0.h"
#include "smu13_driver_if_yellow_carp.h"
#include "yellow_carp_ppt.h"
#include "smu_v13_0_1_ppsmc.h"
@@ -186,6 +186,22 @@ err0_out:
return -ENOMEM;
}
+static int yellow_carp_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->clocks_table);
+ smu_table->clocks_table = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
+
+ return 0;
+}
+
static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -282,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type)
if (index < 0)
return index == -EACCES ? 0 : index;
- mutex_lock(&smu->message_lock);
-
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
-
- mutex_unlock(&smu->message_lock);
-
- mdelay(10);
+ ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to mode reset!\n");
return ret;
}
@@ -659,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1203,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
}
static const struct pptable_funcs yellow_carp_ppt_funcs = {
- .check_fw_status = smu_v13_0_1_check_fw_status,
- .check_fw_version = smu_v13_0_1_check_fw_version,
+ .check_fw_status = smu_v13_0_check_fw_status,
+ .check_fw_version = smu_v13_0_check_fw_version,
.init_smc_tables = yellow_carp_init_smc_tables,
- .fini_smc_tables = smu_v13_0_1_fini_smc_tables,
- .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values,
+ .fini_smc_tables = yellow_carp_fini_smc_tables,
+ .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.system_features_control = yellow_carp_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
- .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables,
+ .set_default_dpm_table = yellow_carp_set_default_dpm_tables,
.read_sensor = yellow_carp_read_sensor,
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
@@ -1222,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
- .set_driver_table_location = smu_v13_0_1_set_driver_table_location,
- .gfx_off_control = smu_v13_0_1_gfx_off_control,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .gfx_off_control = smu_v13_0_gfx_off_control,
.post_init = yellow_carp_post_smu_init,
.mode2_reset = yellow_carp_mode2_reset,
.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f4fb68e8955a..e382b7f2353b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
+ return;
case __I915_MADV_PURGED:
return;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 21c8b7350b7a..da4f5eb43ac2 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
__i915_gem_object_pin_pages(pt->base);
i915_gem_object_make_unshrinkable(pt->base);
- if (lvl ||
- gen8_pt_count(*start, end) < I915_PDES ||
- intel_vgpu_active(vm->i915))
- fill_px(pt, vm->scratch[lvl]->encode);
+ fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
if (likely(!pd->entry[idx])) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index cac7f3f44642..f8948de72036 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
- return ERR_PTR(-EDEADLK);
+ return ERR_PTR(-ENOBUFS);
}
int __i915_vma_pin_fence(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 141178754231..1e8a971a86f2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_CACHED_COHERENT:
if (priv->has_cached_coherent)
break;
- /* fallthrough */
+ fallthrough;
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index ef70140c5b09..873cbd38e6d3 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_read_id(nt);
- if (ret)
- return ret;
+ nt35510_read_id(nt);
/* Set up stuff in manufacturer control, page 1 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 19fd39d9a00c..37a1b6a6ad6d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct qxl_bo *qbo;
struct qxl_device *qdev;
- if (!qxl_ttm_bo_is_qxl_bo(bo))
+ if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
return;
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 03395386e8a7..f4b08a8705b3 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev,
struct drm_mm *mm = &rman->mm;
int ret;
+ if (!man)
+ return 0;
+
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6f5ea00973e0..45aeeca9b8f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -36,6 +36,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 5648664f71bc..f2d625415458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- ttm_bo_unpin(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index afec40da9b58..9776b755d848 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -159,7 +159,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b44cbb8e84eb..b566f7cb7797 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -949,7 +949,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_t sector_off = mr_status.sig_err.sig_err_offset;
sector_div(sector_off, sector_size + 8);
- *sector = scsi_get_lba(iser_task->sc) + sector_off;
+ *sector = scsi_get_sector(iser_task->sc) + sector_off;
iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8d5cf5eb5778..71eda91e810c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1280,7 +1280,7 @@ static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
{
struct srp_terminate_context *context = context_ptr;
struct srp_target_port *target = context->srp_target;
- u32 tag = blk_mq_unique_tag(scmnd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
struct srp_request *req = scsi_cmd_priv(scmnd);
@@ -2152,6 +2152,7 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
+ struct request *rq = scsi_cmd_to_rq(scmnd);
struct srp_target_port *target = host_to_target(shost);
struct srp_rdma_ch *ch;
struct srp_request *req = scsi_cmd_priv(scmnd);
@@ -2166,8 +2167,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (unlikely(scmnd->result))
goto err;
- WARN_ON_ONCE(scmnd->request->tag < 0);
- tag = blk_mq_unique_tag(scmnd->request);
+ WARN_ON_ONCE(rq->tag < 0);
+ tag = blk_mq_unique_tag(rq);
ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
spin_lock_irqsave(&ch->lock, flags);
@@ -2791,7 +2792,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (!req)
return SUCCESS;
- tag = blk_mq_unique_tag(scmnd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
return SUCCESS;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index dd20b01771c4..235f9bdaeaf2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -379,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
switch (idx) {
case CMDQ_ERR_CERROR_ABT_IDX:
dev_err(smmu->dev, "retrying command fetch\n");
+ return;
case CMDQ_ERR_CERROR_NONE_IDX:
return;
case CMDQ_ERR_CERROR_ATC_INV_IDX:
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 25ed444ff94d..021cf8f65ffc 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -849,12 +849,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ return ret;
}
- ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
- if (ret)
- goto err_unregister_device;
+ bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
if (qcom_iommu->local_base) {
pm_runtime_get_sync(dev);
@@ -863,13 +861,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
}
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&qcom_iommu->iommu);
-
-err_sysfs_remove:
- iommu_device_sysfs_remove(&qcom_iommu->iommu);
- return ret;
}
static int qcom_iommu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a6a07d985709..dd22fc7d5176 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2429,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
-static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{
- unsigned long flags;
+ struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ unsigned long flags;
u16 did_old;
if (!iommu)
@@ -2444,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
spin_unlock_irqrestore(&iommu->lock, flags);
return;
}
- did_old = context_domain_id(context);
+
+ if (sm_supported(iommu)) {
+ if (hw_pass_through && domain_type_is_si(info->domain))
+ did_old = FLPT_DEFAULT_DID;
+ else
+ did_old = info->domain->iommu_did[iommu->seq_id];
+ } else {
+ did_old = context_domain_id(context);
+ }
+
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -2462,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
0,
0,
DMA_TLB_DSI_FLUSH);
+
+ __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
static inline void unlink_domain_info(struct device_domain_info *info)
@@ -4425,9 +4437,9 @@ out_free_dmar:
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
- struct intel_iommu *iommu = opaque;
+ struct device_domain_info *info = opaque;
- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
@@ -4437,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+static void domain_context_clear(struct device_domain_info *info)
{
- if (!iommu || !dev || !dev_is_pci(dev))
+ if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
return;
- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
}
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
@@ -4459,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
iommu = info->iommu;
domain = info->domain;
- if (info->dev) {
+ if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
iommu_disable_dev_iotlb(info);
- if (!dev_is_real_dma_subdevice(info->dev))
- domain_context_clear(iommu, info->dev);
+ domain_context_clear(info);
intel_pasid_free_table(info->dev);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 94b9d8e5b9a4..9febfb7f3025 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -544,12 +544,14 @@ static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
}
#define DT_HI_MASK GENMASK_ULL(39, 32)
+#define DTE_BASE_HI_MASK GENMASK(11, 4)
#define DT_SHIFT 28
static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
{
- return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) |
- ((addr & DT_HI_MASK) << DT_SHIFT);
+ u64 addr64 = addr;
+ return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
+ ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
}
static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0db17bcc9c16..cb1a64a5c256 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
}
+ fallthrough;
+
case JZ4740_MMC_STATE_DONE:
break;
}
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 99b7986002f0..6a6a2a21d2ed 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -108,8 +108,8 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
#if BITS_PER_LONG >= 64
case 8:
onecmd |= (onecmd << (chip_mode * 32));
-#endif
fallthrough;
+#endif
case 4:
onecmd |= (onecmd << (chip_mode * 16));
fallthrough;
@@ -164,8 +164,8 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
#if BITS_PER_LONG >= 64
case 8:
res |= (onestat >> (chip_mode * 32));
-#endif
fallthrough;
+#endif
case 4:
res |= (onestat >> (chip_mode * 16));
fallthrough;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0ff7567bd04f..d22d78303311 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
static int bond_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
+ int err;
if (!bond_dev)
return -EINVAL;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- xs->xso.real_dev = slave->dev;
- bond->xs = xs;
+ if (!slave) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ rcu_read_unlock();
return -EINVAL;
}
- return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ if (!ipsec) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+ xs->xso.real_dev = slave->dev;
+
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ if (!err) {
+ ipsec->xs = xs;
+ INIT_LIST_HEAD(&ipsec->list);
+ spin_lock_bh(&bond->ipsec_lock);
+ list_add(&ipsec->list, &bond->ipsec_list);
+ spin_unlock_bh(&bond->ipsec_lock);
+ } else {
+ kfree(ipsec);
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static void bond_ipsec_add_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ goto out;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
+ spin_lock_bh(&bond->ipsec_lock);
+ if (!list_empty(&bond->ipsec_list))
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_add\n",
+ __func__);
+ spin_unlock_bh(&bond->ipsec_lock);
+ goto out;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ ipsec->xs->xso.real_dev = slave->dev;
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+out:
+ rcu_read_unlock();
}
/**
@@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
if (!bond_dev)
return;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
- return;
+ goto out;
- xs->xso.real_dev = slave->dev;
+ if (!xs->xso.real_dev)
+ goto out;
+
+ WARN_ON(xs->xso.real_dev != slave->dev);
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- return;
+ goto out;
}
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+out:
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ } else {
+ slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ }
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
}
/**
@@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
- struct net_device *slave_dev = curr_active->dev;
+ struct net_device *real_dev;
+ struct slave *curr_active;
+ struct bonding *bond;
+ int err;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
- return true;
+ bond = netdev_priv(bond_dev);
+ rcu_read_lock();
+ curr_active = rcu_dereference(bond->curr_active_slave);
+ real_dev = curr_active->dev;
- if (!(slave_dev->xfrmdev_ops
- && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
- slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
- return false;
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ err = false;
+ goto out;
}
- xs->xso.real_dev = slave_dev;
- return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ if (!xs->xso.real_dev) {
+ err = false;
+ goto out;
+ }
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+ netif_is_bond_master(real_dev)) {
+ err = false;
+ goto out;
+ }
+
+ err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+out:
+ rcu_read_unlock();
+ return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
@@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
#ifdef CONFIG_XFRM_OFFLOAD
- if (old_active && bond->xs)
- bond_ipsec_del_sa(bond->xs);
+ bond_ipsec_del_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) {
@@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
#ifdef CONFIG_XFRM_OFFLOAD
- if (new_active && bond->xs) {
- xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
- bond_ipsec_add_sa(bond->xs);
- }
+ bond_ipsec_add_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or
@@ -3327,6 +3450,7 @@ static int bond_master_netdev_event(unsigned long event,
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
+ xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
@@ -4894,7 +5018,8 @@ void bond_setup(struct net_device *bond_dev)
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
- bond->xs = NULL;
+ INIT_LIST_HEAD(&bond->ipsec_list);
+ spin_lock_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index a77124bc1f4b..709660cb38f8 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -20,15 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-config CAIF_HSI
- tristate "CAIF HSI transport driver"
- depends on CAIF
- default n
- help
- The CAIF low level driver for CAIF over HSI.
- Be aware that if you enable this then you also need to
- enable a low-level HSI driver.
-
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF && HAS_DMA
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b1918c8c126c..97f664f8016c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
-# HSI interface
-obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
-
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
deleted file mode 100644
index 3d63b15bbaa1..000000000000
--- a/drivers/net/caif/caif_hsi.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/timer.h>
-#include <net/rtnetlink.h>
-#include <linux/pkt_sched.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_hsi.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF HSI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
- (((pow)-((x)&((pow)-1)))))
-
-static const struct cfhsi_config hsi_default_config = {
-
- /* Inactivity timeout on HSI, ms */
- .inactivity_timeout = HZ,
-
- /* Aggregation timeout (ms) of zero means no aggregation is done*/
- .aggregation_timeout = 1,
-
- /*
- * HSI link layer flow-control thresholds.
- * Threshold values for the HSI packet queue. Flow-control will be
- * asserted when the number of packets exceeds q_high_mark. It will
- * not be de-asserted before the number of packets drops below
- * q_low_mark.
- * Warning: A high threshold value might increase throughput but it
- * will at the same time prevent channel prioritization and increase
- * the risk of flooding the modem. The high threshold should be above
- * the low.
- */
- .q_high_mark = 100,
- .q_low_mark = 50,
-
- /*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
- .head_align = 4,
- .tail_align = 4,
-};
-
-#define ON 1
-#define OFF 0
-
-static LIST_HEAD(cfhsi_list);
-
-static void cfhsi_inactivity_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Schedule power down work queue. */
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_down_work);
-}
-
-static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
- const struct sk_buff *skb,
- int direction)
-{
- struct caif_payload_info *info;
- int hpad, tpad, len;
-
- info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
- len = skb->len + hpad + tpad;
-
- if (direction > 0)
- cfhsi->aggregation_len += len;
- else if (direction < 0)
- cfhsi->aggregation_len -= len;
-}
-
-static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
-{
- int i;
-
- if (cfhsi->cfg.aggregation_timeout == 0)
- return true;
-
- for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
- if (cfhsi->qhead[i].qlen)
- return true;
- }
-
- /* TODO: Use aggregation_len instead */
- if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
- return true;
-
- return false;
-}
-
-static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
- skb = skb_dequeue(&cfhsi->qhead[i]);
- if (skb)
- break;
- }
-
- return skb;
-}
-
-static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
-{
- int i, len = 0;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- len += skb_queue_len(&cfhsi->qhead[i]);
- return len;
-}
-
-static void cfhsi_abort_tx(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
-
- for (;;) {
- spin_lock_bh(&cfhsi->lock);
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- break;
-
- cfhsi->ndev->stats.tx_errors++;
- cfhsi->ndev->stats.tx_dropped++;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
- kfree_skb(skb);
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-}
-
-static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
-{
- char buffer[32]; /* Any reasonable value */
- size_t fifo_occupancy;
- int ret;
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- do {
- ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy);
- if (ret) {
- netdev_warn(cfhsi->ndev,
- "%s: can't get FIFO occupancy: %d.\n",
- __func__, ret);
- break;
- } else if (!fifo_occupancy)
- /* No more data, exitting normally */
- break;
-
- fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
- set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->ops);
- if (ret) {
- clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- netdev_warn(cfhsi->ndev,
- "%s: can't read data: %d.\n",
- __func__, ret);
- break;
- }
-
- ret = 5 * HZ;
- ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
- !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
-
- if (ret < 0) {
- netdev_warn(cfhsi->ndev,
- "%s: can't wait for flush complete: %d.\n",
- __func__, ret);
- break;
- } else if (!ret) {
- ret = -ETIMEDOUT;
- netdev_warn(cfhsi->ndev,
- "%s: timeout waiting for flush complete.\n",
- __func__);
- break;
- }
- } while (1);
-
- return ret;
-}
-
-static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int nfrms = 0;
- int pld_len = 0;
- struct sk_buff *skb;
- u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
-
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- return 0;
-
- /* Clear offset. */
- desc->offset = 0;
-
- /* Check if we can embed a CAIF frame. */
- if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Check if frame still fits with added alignment. */
- if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
- u8 *pemb = desc->emb_frm;
- desc->offset = CFHSI_DESC_SHORT_SZ;
- *pemb = (u8)(hpad - 1);
- pemb += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in embedded CAIF frame. */
- skb_copy_bits(skb, 0, pemb, skb->len);
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
- }
- }
-
- /* Create payload CAIF frames. */
- while (nfrms < CFHSI_MAX_PKTS) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- if (!skb)
- skb = cfhsi_dequeue(cfhsi);
-
- if (!skb)
- break;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Fill in CAIF frame length in descriptor. */
- desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
-
- /* Fill head padding information. */
- *pfrm = (u8)(hpad - 1);
- pfrm += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pfrm, skb->len);
-
- /* Update payload length. */
- pld_len += desc->cffrm_len[nfrms];
-
- /* Update frame pointer. */
- pfrm += skb->len + tpad;
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
-
- /* Update number of frames. */
- nfrms++;
- }
-
- /* Unused length fields should be zero-filled (according to SPEC). */
- while (nfrms < CFHSI_MAX_PKTS) {
- desc->cffrm_len[nfrms] = 0x0000;
- nfrms++;
- }
-
- /* Check if we can piggy-back another descriptor. */
- if (cfhsi_can_send_aggregate(cfhsi))
- desc->header |= CFHSI_PIGGY_DESC;
- else
- desc->header &= ~CFHSI_PIGGY_DESC;
-
- return CFHSI_DESC_SZ + pld_len;
-}
-
-static void cfhsi_start_tx(struct cfhsi *cfhsi)
-{
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len, res;
-
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- do {
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- if (!len) {
- spin_lock_bh(&cfhsi->lock);
- if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
- spin_unlock_bh(&cfhsi->lock);
- res = -EAGAIN;
- continue;
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- break;
- }
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- } while (res < 0);
-}
-
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
-{
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /*
- * Send flow on if flow off has been previously signalled
- * and number of packets is below low water mark.
- */
- spin_lock_bh(&cfhsi->lock);
- if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
- cfhsi->cfdev.flowctrl) {
-
- cfhsi->flow_off_sent = 0;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
- }
-
- if (cfhsi_can_send_aggregate(cfhsi)) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_start_tx(cfhsi);
- } else {
- mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->cfg.aggregation_timeout);
- spin_unlock_bh(&cfhsi->lock);
- }
-
- return;
-}
-
-static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
- cfhsi_tx_done(cfhsi);
-}
-
-static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Check for embedded CAIF frame. */
- if (desc->offset) {
- struct sk_buff *skb;
- int len = 0;
- pfrm = ((u8 *)desc) + desc->offset;
-
- /* Remove offset padding. */
- pfrm += *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pfrm;
- len |= ((*(pfrm+1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frame. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pfrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Check for piggy-backed descriptor. */
- if (desc->header & CFHSI_PIGGY_DESC)
- xfer_sz += CFHSI_DESC_SZ;
-
- if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- netdev_err(cfhsi->ndev,
- "%s: Invalid payload len: %d, ignored.\n",
- __func__, xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
-
- pr_err("Invalid descriptor. %x %x\n", desc->header,
- desc->offset);
- return -EPROTO;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- if (xfer_sz % 4) {
- pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int rx_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- /* Sanity check header and offset. */
- if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Set frame pointer to start of payload. */
- pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
- plen = desc->cffrm_len;
-
- /* Skip already processed frames. */
- while (nfrms < cfhsi->rx_state.nfrms) {
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Parse payload. */
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- struct sk_buff *skb;
- u8 *pcffrm = NULL;
- int len;
-
- /* CAIF frame starts after head padding. */
- pcffrm = pfrm + *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pcffrm;
- len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frames. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- cfhsi->rx_state.nfrms = nfrms;
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pcffrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
-
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- return rx_sz;
-}
-
-static void cfhsi_rx_done(struct cfhsi *cfhsi)
-{
- int res;
- int desc_pld_len = 0, rx_len, rx_state;
- struct cfhsi_desc *desc = NULL;
- u8 *rx_ptr, *rx_buf;
- struct cfhsi_desc *piggy_desc = NULL;
-
- desc = (struct cfhsi_desc *)cfhsi->rx_buf;
-
- netdev_dbg(cfhsi->ndev, "%s\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Update inactivity timer if pending. */
- spin_lock_bh(&cfhsi->lock);
- mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- desc_pld_len = cfhsi_rx_desc_len(desc);
-
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- rx_buf = cfhsi->rx_buf;
- rx_len = desc_pld_len;
- if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
- if (desc_pld_len == 0)
- rx_buf = cfhsi->rx_flip_buf;
- } else {
- rx_buf = cfhsi->rx_flip_buf;
-
- rx_len = CFHSI_DESC_SZ;
- if (cfhsi->rx_state.pld_len > 0 &&
- (desc->header & CFHSI_PIGGY_DESC)) {
-
- piggy_desc = (struct cfhsi_desc *)
- (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
- cfhsi->rx_state.pld_len);
-
- cfhsi->rx_state.piggy_desc = true;
-
- /* Extract payload len from piggy-backed descriptor. */
- desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- if (desc_pld_len > 0) {
- rx_len = desc_pld_len;
- if (piggy_desc->header & CFHSI_PIGGY_DESC)
- rx_len += CFHSI_DESC_SZ;
- }
-
- /*
- * Copy needed information from the piggy-backed
- * descriptor to the descriptor in the start.
- */
- memcpy(rx_buf, (u8 *)piggy_desc,
- CFHSI_DESC_SHORT_SZ);
- }
- }
-
- if (desc_pld_len) {
- rx_state = CFHSI_RX_STATE_PAYLOAD;
- rx_ptr = rx_buf + CFHSI_DESC_SZ;
- } else {
- rx_state = CFHSI_RX_STATE_DESC;
- rx_ptr = rx_buf;
- rx_len = CFHSI_DESC_SZ;
- }
-
- /* Initiate next read */
- if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
- /* Set up new transfer. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
- __func__);
-
- res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
- __func__, res);
- cfhsi->ndev->stats.rx_errors++;
- cfhsi->ndev->stats.rx_dropped++;
- }
- }
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- /* Extract payload from descriptor */
- if (cfhsi_rx_desc(desc, cfhsi) < 0)
- goto out_of_sync;
- } else {
- /* Extract payload */
- if (cfhsi_rx_pld(desc, cfhsi) < 0)
- goto out_of_sync;
- if (piggy_desc) {
- /* Extract any payload in piggyback descriptor. */
- if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
- goto out_of_sync;
- /* Mark no embedded frame after extracting it */
- piggy_desc->offset = 0;
- }
- }
-
- /* Update state info */
- memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
- cfhsi->rx_state.state = rx_state;
- cfhsi->rx_ptr = rx_ptr;
- cfhsi->rx_len = rx_len;
- cfhsi->rx_state.pld_len = desc_pld_len;
- cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
-
- if (rx_buf != cfhsi->rx_buf)
- swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
- return;
-
-out_of_sync:
- netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
- print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
- cfhsi->rx_buf, CFHSI_DESC_SZ);
- schedule_work(&cfhsi->out_of_sync_work);
-}
-
-static void cfhsi_rx_slowpath(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
- wake_up_interruptible(&cfhsi->flush_fifo_wait);
- else
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_wake_up(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
- int res;
- int len;
- long ret;
-
- cfhsi = container_of(work, struct cfhsi, wake_up_work);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
- /* It happenes when wakeup is requested by
- * both ends at the same time. */
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- return;
- }
-
- /* Activate wake line. */
- cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
-
- netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
- __func__);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
- test_and_clear_bit(CFHSI_WAKE_UP_ACK,
- &cfhsi->bits), ret);
- if (unlikely(ret < 0)) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- } else if (!ret) {
- bool ca_wake = false;
- size_t fifo_occupancy = 0;
-
- /* Wakeup timeout */
- netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
- __func__);
-
- /* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
- __func__, (unsigned) fifo_occupancy);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
-
- if (ca_wake) {
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
-
- /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- /* Continue execution. */
- goto wake_ack;
- }
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- }
-wake_ack:
- netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
- __func__);
-
- /* Clear power up bit. */
- set_bit(CFHSI_AWAKE, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-
- /* Resume read operation. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
- res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
-
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
-
- /* Clear power up acknowledment. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Resume transmit if queues are not empty. */
- if (!cfhsi_tx_queue_len(cfhsi)) {
- netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
- __func__);
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- return;
- }
-
- netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
- __func__);
-
- spin_unlock_bh(&cfhsi->lock);
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
-
- if (likely(len > 0)) {
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- netdev_err(cfhsi->ndev,
- "%s: Failed to create HSI frame: %d.\n",
- __func__, len);
- }
-}
-
-static void cfhsi_wake_down(struct work_struct *work)
-{
- long ret;
- struct cfhsi *cfhsi = NULL;
- size_t fifo_occupancy = 0;
- int retry = CFHSI_WAKE_TOUT;
-
- cfhsi = container_of(work, struct cfhsi, wake_down_work);
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Deactivate wake line. */
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
- test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
- &cfhsi->bits), ret);
- if (ret < 0) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
- return;
- } else if (!ret) {
- bool ca_wake = true;
-
- /* Timeout */
- netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
- if (!ca_wake)
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
- }
-
- /* Check FIFO occupancy. */
- while (retry) {
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- if (!fifo_occupancy)
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- retry--;
- }
-
- if (!retry)
- netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
-
- /* Clear AWAKE condition. */
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Cancel pending RX requests. */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-}
-
-static void cfhsi_out_of_sync(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
-
- rtnl_lock();
- dev_close(cfhsi->ndev);
- rtnl_unlock();
-}
-
-static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_up_wait);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Schedule wake up work queue if the peer initiates. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
-}
-
-static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Initiating low power is only permitted by the host (us). */
- set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_down_wait);
-}
-
-static void cfhsi_aggregation_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_start_tx(cfhsi);
-}
-
-static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct cfhsi *cfhsi = NULL;
- int start_xfer = 0;
- int timer_active;
- int prio;
-
- if (!dev)
- return -EINVAL;
-
- cfhsi = netdev_priv(dev);
-
- switch (skb->priority) {
- case TC_PRIO_BESTEFFORT:
- case TC_PRIO_FILLER:
- case TC_PRIO_BULK:
- prio = CFHSI_PRIO_BEBK;
- break;
- case TC_PRIO_INTERACTIVE_BULK:
- prio = CFHSI_PRIO_VI;
- break;
- case TC_PRIO_INTERACTIVE:
- prio = CFHSI_PRIO_VO;
- break;
- case TC_PRIO_CONTROL:
- default:
- prio = CFHSI_PRIO_CTL;
- break;
- }
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Update aggregation statistics */
- cfhsi_update_aggregation_stats(cfhsi, skb, 1);
-
- /* Queue the SKB */
- skb_queue_tail(&cfhsi->qhead[prio], skb);
-
- /* Sanity check; xmit should not be called after unregister_netdev */
- if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_abort_tx(cfhsi);
- return -EINVAL;
- }
-
- /* Send flow off if number of packets is above high water mark. */
- if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
- cfhsi->cfdev.flowctrl) {
- cfhsi->flow_off_sent = 1;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
- }
-
- if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
- cfhsi->tx_state = CFHSI_TX_STATE_XFER;
- start_xfer = 1;
- }
-
- if (!start_xfer) {
- /* Send aggregate if it is possible */
- bool aggregate_ready =
- cfhsi_can_send_aggregate(cfhsi) &&
- del_timer(&cfhsi->aggregation_timer) > 0;
- spin_unlock_bh(&cfhsi->lock);
- if (aggregate_ready)
- cfhsi_start_tx(cfhsi);
- return NETDEV_TX_OK;
- }
-
- /* Delete inactivity timer if started. */
- timer_active = del_timer_sync(&cfhsi->inactivity_timer);
-
- spin_unlock_bh(&cfhsi->lock);
-
- if (timer_active) {
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len;
- int res;
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- WARN_ON(!len);
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- /* Schedule wake up work queue if the we initiate. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
- }
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops cfhsi_netdevops;
-
-static void cfhsi_setup(struct net_device *dev)
-{
- int i;
- struct cfhsi *cfhsi = netdev_priv(dev);
- dev->features = 0;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
- dev->priv_flags |= IFF_NO_QUEUE;
- dev->needs_free_netdev = true;
- dev->netdev_ops = &cfhsi_netdevops;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- skb_queue_head_init(&cfhsi->qhead[i]);
- cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfhsi->cfdev.use_frag = false;
- cfhsi->cfdev.use_stx = false;
- cfhsi->cfdev.use_fcs = false;
- cfhsi->ndev = dev;
- cfhsi->cfg = hsi_default_config;
-}
-
-static int cfhsi_open(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- int res;
-
- clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Initialize state vaiables. */
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
-
- /* Set flow info */
- cfhsi->flow_off_sent = 0;
-
- /*
- * Allocate a TX buffer with the size of a HSI packet descriptors
- * and the necessary room for CAIF payload frames.
- */
- cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
- if (!cfhsi->tx_buf) {
- res = -ENODEV;
- goto err_alloc_tx;
- }
-
- /*
- * Allocate a RX buffer with the size of two HSI packet descriptors and
- * the necessary room for CAIF payload frames.
- */
- cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_buf) {
- res = -ENODEV;
- goto err_alloc_rx;
- }
-
- cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_flip_buf) {
- res = -ENODEV;
- goto err_alloc_rx_flip;
- }
-
- /* Initialize aggregation timeout */
- cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
-
- /* Initialize recieve vaiables. */
- cfhsi->rx_ptr = cfhsi->rx_buf;
- cfhsi->rx_len = CFHSI_DESC_SZ;
-
- /* Initialize spin locks. */
- spin_lock_init(&cfhsi->lock);
-
- /* Set up the driver. */
- cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
-
- /* Initialize the work queues. */
- INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
- INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
- INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
-
- /* Clear all bit fields. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Create work thread. */
- cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
- if (!cfhsi->wq) {
- netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
- __func__);
- res = -ENODEV;
- goto err_create_wq;
- }
-
- /* Initialize wait queues. */
- init_waitqueue_head(&cfhsi->wake_up_wait);
- init_waitqueue_head(&cfhsi->wake_down_wait);
- init_waitqueue_head(&cfhsi->flush_fifo_wait);
-
- /* Setup the inactivity timer. */
- timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
- /* Setup the slowpath RX timer. */
- timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
- /* Setup the aggregation timer. */
- timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
-
- /* Activate HSI interface. */
- res = cfhsi->ops->cfhsi_up(cfhsi->ops);
- if (res) {
- netdev_err(cfhsi->ndev,
- "%s: can't activate HSI interface: %d.\n",
- __func__, res);
- goto err_activate;
- }
-
- /* Flush FIFO */
- res = cfhsi_flush_fifo(cfhsi);
- if (res) {
- netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
- __func__, res);
- goto err_net_reg;
- }
- return res;
-
- err_net_reg:
- cfhsi->ops->cfhsi_down(cfhsi->ops);
- err_activate:
- destroy_workqueue(cfhsi->wq);
- err_create_wq:
- kfree(cfhsi->rx_flip_buf);
- err_alloc_rx_flip:
- kfree(cfhsi->rx_buf);
- err_alloc_rx:
- kfree(cfhsi->tx_buf);
- err_alloc_tx:
- return res;
-}
-
-static int cfhsi_close(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- u8 *tx_buf, *rx_buf, *flip_buf;
-
- /* going to shutdown driver */
- set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Delete timers if pending */
- del_timer_sync(&cfhsi->inactivity_timer);
- del_timer_sync(&cfhsi->rx_slowpath_timer);
- del_timer_sync(&cfhsi->aggregation_timer);
-
- /* Cancel pending RX request (if any) */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-
- /* Destroy workqueue */
- destroy_workqueue(cfhsi->wq);
-
- /* Store bufferes: will be freed later. */
- tx_buf = cfhsi->tx_buf;
- rx_buf = cfhsi->rx_buf;
- flip_buf = cfhsi->rx_flip_buf;
- /* Flush transmit queues. */
- cfhsi_abort_tx(cfhsi);
-
- /* Deactivate interface */
- cfhsi->ops->cfhsi_down(cfhsi->ops);
-
- /* Free buffers. */
- kfree(tx_buf);
- kfree(rx_buf);
- kfree(flip_buf);
- return 0;
-}
-
-static void cfhsi_uninit(struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
- ASSERT_RTNL();
- symbol_put(cfhsi_get_device);
- list_del(&cfhsi->list);
-}
-
-static const struct net_device_ops cfhsi_netdevops = {
- .ndo_uninit = cfhsi_uninit,
- .ndo_open = cfhsi_open,
- .ndo_stop = cfhsi_close,
- .ndo_start_xmit = cfhsi_xmit
-};
-
-static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
-{
- int i;
-
- if (!data) {
- pr_debug("no params data found\n");
- return;
- }
-
- i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
- /*
- * Inactivity timeout in millisecs. Lowest possible value is 1,
- * and highest possible is NEXT_TIMER_MAX_DELTA.
- */
- if (data[i]) {
- u32 inactivity_timeout = nla_get_u32(data[i]);
- /* Pre-calculate inactivity timeout. */
- cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
- if (cfhsi->cfg.inactivity_timeout == 0)
- cfhsi->cfg.inactivity_timeout = 1;
- else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
- i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
- if (data[i])
- cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_HEAD_ALIGN;
- if (data[i])
- cfhsi->cfg.head_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_TAIL_ALIGN;
- if (data[i])
- cfhsi->cfg.tail_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
-}
-
-static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- cfhsi_netlink_parms(data, netdev_priv(dev));
- netdev_state_change(dev);
- return 0;
-}
-
-static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
- [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
-};
-
-static size_t caif_hsi_get_size(const struct net_device *dev)
-{
- int i;
- size_t s = 0;
- for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
- s += nla_total_size(caif_hsi_policy[i].len);
- return s;
-}
-
-static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
-
- if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- cfhsi->cfg.inactivity_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- cfhsi->cfg.aggregation_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
- cfhsi->cfg.head_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
- cfhsi->cfg.tail_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- cfhsi->cfg.q_high_mark) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
- cfhsi->cfg.q_low_mark))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_ops *(*get_ops)(void);
-
- ASSERT_RTNL();
-
- cfhsi = netdev_priv(dev);
- cfhsi_netlink_parms(data, cfhsi);
-
- get_ops = symbol_get(cfhsi_get_ops);
- if (!get_ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- return -ENODEV;
- }
-
- /* Assign the HSI device. */
- cfhsi->ops = (*get_ops)();
- if (!cfhsi->ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- goto err;
- }
-
- /* Assign the driver to this HSI device. */
- cfhsi->ops->cb_ops = &cfhsi->cb_ops;
- if (register_netdevice(dev)) {
- pr_warn("%s: caif_hsi device registration failed\n", __func__);
- goto err;
- }
- /* Add CAIF HSI device to list. */
- list_add_tail(&cfhsi->list, &cfhsi_list);
-
- return 0;
-err:
- symbol_put(cfhsi_get_ops);
- return -ENODEV;
-}
-
-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
- .kind = "cfhsi",
- .priv_size = sizeof(struct cfhsi),
- .setup = cfhsi_setup,
- .maxtype = __IFLA_CAIF_HSI_MAX,
- .policy = caif_hsi_policy,
- .newlink = caif_hsi_newlink,
- .changelink = caif_hsi_changelink,
- .get_size = caif_hsi_get_size,
- .fill_info = caif_hsi_fill_info,
-};
-
-static void __exit cfhsi_exit_module(void)
-{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi;
-
- rtnl_link_unregister(&caif_hsi_link_ops);
-
- rtnl_lock();
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdevice(cfhsi->ndev);
- }
- rtnl_unlock();
-}
-
-static int __init cfhsi_init_module(void)
-{
- return rtnl_link_register(&caif_hsi_link_ops);
-}
-
-module_init(cfhsi_init_module);
-module_exit(cfhsi_exit_module);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a7e5ac60baef..1542bfb8b5e5 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (!(dev->port_mask & BIT(port_num)))
+ if (!(dev->port_mask & BIT(port_num))) {
+ of_node_put(port);
return -EINVAL;
+ }
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 961fa6b75cad..beb41572d04e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
@@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index e4fbef81bc52..b1d46dd8eaab 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
struct mv88e6390_serdes_hw_stat *stat;
int i;
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int lane;
int i;
- lane = mv88e6390_serdes_get_lane(chip, port);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4f0545605f6b..ced8c9cb29c2 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
- if (i == dsa_upstream_port(priv->ds, i)) {
- /* STP doesn't get called for CPU port, so we need to
- * set the I/O parameters statically.
- */
- mac[i].dyn_learn = true;
- mac[i].ingress = true;
- mac[i].egress = true;
- }
+
+ /* Let sja1105_bridge_stp_state_set() keep address learning
+ * enabled for the CPU port.
+ */
+ if (dsa_is_cpu_port(ds, i))
+ priv->learn_ena |= BIT(i);
}
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 7dff20350865..f19370c33444 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
+ if (hw->nic_type == athr_mt) {
+ hw->phy_configured = true;
+ return 0;
+ }
+
if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
dev_err(&pdev->dev, "Error get phy ID\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 41f7f078cd27..db74241935ab 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
+ EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
+ unsigned int i;
u32 reg;
u32 dma_ctrl;
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
- u32 reg;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 reg;
int ret;
if (!netif_running(dev))
@@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index facde824bcaa..e31a5a397f11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- if (priv->hw_params->flags & GENET_HAS_EXT) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg &= ~EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9a2b166d651e..dbf9a0e6601d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) {
- detach_ulds(adapter);
- t4_uld_clean_up(adapter);
- }
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
-
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 743af9e654aa..17faac715882 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 867e87af3432..099a2bc5ae67 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err)
- return -ENXIO;
+ return err;
err = pci_request_regions(pdev, "gvnic-cfg");
if (err)
@@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
dev_err(&pdev->dev, "could not allocate netdev\n");
+ err = -ENOMEM;
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
- goto abort_with_wq;
+ goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_work(priv->gve_wq, &priv->service_task);
return 0;
+abort_with_gve_init:
+ gve_teardown_priv_resources(priv);
+
abort_with_wq:
destroy_workqueue(priv->gve_wq);
@@ -1590,7 +1587,7 @@ abort_with_pci_region:
abort_with_enabled:
pci_disable_device(pdev);
- return -ENXIO;
+ return err;
}
static void gve_remove(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 77bb8227f89b..8500621b2cd4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- /* Prefetch the payload header. */
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
-#if L1_CACHE_BYTES < 128
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
- L1_CACHE_BYTES);
-#endif
-
if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 374a75d4faea..ed77191d19f4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2420,9 +2420,10 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
- struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
bool saved_state = false;
+ struct ibmvnic_rwi *tmprwi;
+ struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
} else {
rc = do_reset(adapter, rwi, reset_state);
}
- kfree(rwi);
+ tmprwi = rwi;
adapter->last_reset_time = jiffies;
if (rc)
@@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
+ /*
+ * If there is another reset queued, free the previous rwi
+ * and process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
+ *
+ * If there are no resets queued and the previous reset failed,
+ * the adapter would be in an undefined state. So retry the
+ * previous reset as a hard reset.
+ */
+ if (rwi)
+ kfree(tmprwi);
+ else if (rc)
+ rwi = tmprwi;
+
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
- rwi->reset_reason == VNIC_RESET_MOBILITY))
+ rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
adapter->force_reset_recovery = true;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d150dade06cf..757a54c39eef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7664,6 +7664,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index dbcae92bb18d..adfa2768f024 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2227,6 +2227,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e612c24fa384..44bafedd09f2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7e6435dc7e80..171a7a629b20 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
- struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
+ ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -3615,6 +3623,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 9e0bbb2e55e3..5901ed9fb545 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 95323095094d..e29aadbc6744 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -6054,6 +6056,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ffff69efd78a..913253f8ecb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11067,6 +11067,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index caaea2c920a6..e3e4676af9e4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
int ret;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
@@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4fbe20b..76a7777c746d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
+
+ /* last fragment */
+ if (len == *size) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ sinfo->nr_frags = xdp_sinfo->nr_frags;
+ memcpy(sinfo->frags, xdp_sinfo->frags,
+ sinfo->nr_frags * sizeof(skb_frag_t));
+ }
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fac6474ad694..9169849881bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
mac_ops = cgx_dev->mac_ops;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 12521262164a..237ba2b56210 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 45706fd87120..a8b7b1c7a1d5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
- * struct lmac
+ * struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
* @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
* @name: lmac port name
*/
struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
- bool cmd_pend;
struct cgx *cgx;
+ u8 mcast_filters_count;
u8 lmac_id;
+ bool cmd_pend;
char *name;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 770d86262838..f5ec39de026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@@ -1278,6 +1326,14 @@ struct set_vf_perm {
u64 flags;
};
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0b092949d7ac..10cddf1ac7b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 9e5d9ba6f01e..10e58a5d5861 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -243,6 +243,7 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
};
@@ -656,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -741,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@@ -754,6 +758,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6e2bf4fcd29c..6cc8fbb7190c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7d9e71c6965f..8d48b64485c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ u64 lmt_addr, val;
+ u32 pri_tbl_idx;
+ int err = 0;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ return err;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+}
+
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 3cc3c6fd1d84..370d4ca1e5ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d6f8210652c5..aeae37704428 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 76837d5e19c6..8b01ef6e2c99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -692,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 14aa8e37ea41..5bbe6727d11d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
- BLK_COUNT = 0x12ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 457c94793e63..3254b02205ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 1b08896b46d2..184de9466286 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
+ struct lmtst_tbl_setup_req *req;
+ int qcount, err;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- pf->hw.lmt_base = ioremap(base, size);
+ pfvf->hw_ops = &cn10k_hw_ops;
+ qcount = pfvf->hw.max_queues;
+ /* LMTST lines allocation
+ * qcount = num_online_cpus();
+ * NPA = TX + RX + XDP.
+ * NIX = TX * 32 (For Burst SQE flush).
+ */
+ pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+ pfvf->npa_lmt_lines = qcount * 3;
+ pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
+ req->use_local_lmt_region = true;
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
-
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
- }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
+ sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
- struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 71292a4cf1f3..1a1ae334477d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index cf7875d51d87..7cccd802c4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 234b330f3183..8fd58cd07f50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
-#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
- void __iomem *lmt_base;
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
@@ -288,6 +288,9 @@ struct otx2_flow_config {
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@@ -329,6 +332,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@@ -363,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
+ struct qmem *dync_lmt;
u16 tot_lmt_lines;
- u16 nix_lmt_lines;
+ u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..383a6b5cb698
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 8c97106bdd1c..4d9de525802d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,6 +18,12 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->ntuple_max_flows;
+}
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->ntuple_max_flows - 1);
+ err = -EINVAL;
+ } else {
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
if (new)
kfree(flow);
@@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 59912f73417b..f300b807a85b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
- (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+ (pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@@ -2526,7 +2535,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_pf_lmtst_init(pf);
+ err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@@ -2630,8 +2639,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2772,9 +2781,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
- if (pf->hw.lmt_base)
- iounmap(pf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 905fc02a7dfe..972b202b9884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps;
+ bool pps = false;
u64 rate;
int i;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 52486c1f0973..2f144e2cf436 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
+ u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 13a908f75ba0..a8bee5aefec1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_vf_lmtst_init(vf);
+ err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
-
- if (vf->hw.lmt_base)
- iounmap(vf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index a80419d8d4b5..ac403d43c74c 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -2,6 +2,7 @@ config SPARX5_SWITCH
tristate "Sparx5 switch driver"
depends on NET_SWITCHDEV
depends on HAS_IOMEM
+ depends on OF
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5249b64f4fc5..49def6934cad 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto init_fail;
- }
netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
__func__, ndev->irq, ndev->dev_addr);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 3e89e34f86d5..e9d260d84bf3 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
@@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_netdevice_bridge_join(dev, dev,
+ err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
- err = ocelot_netdevice_bridge_leave(dev, dev,
+ err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
@@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
if (ocelot_port->bond != dev)
return NOTIFY_OK;
- err = ocelot_netdevice_changeupper(lower, info);
+ err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
@@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
- return ocelot_netdevice_changeupper(dev, info);
+ return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 273d529d43c2..062bb2db68bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
- /* If this is the last pre_ct_rule it means that it is
- * very likely that the nft table will be cleaned up next,
- * as this happens on the removal of the last act_ct flow.
- * However we cannot deregister the callback on the removal
- * of the last nft flow as this runs into a deadlock situation.
- * So deregister the callback on removal of the last pre_ct flow
- * and remove any remaining nft flow entries. We also cannot
- * save this state and delete the callback later since the
- * nft table would already have been freed at that time.
- */
if (!zt->pre_ct_count) {
- nf_flow_table_offload_del_cb(zt->nft,
- nfp_fl_ct_handle_nft_flow,
- zt);
zt->nft = NULL;
nfp_fl_ct_clean_nft_entries(zt);
}
@@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_ct_map_params);
nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
kfree(ct_map_ent);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 8543bf3c3484..ad655f0a4965 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
- free_netdev(netdev);
if (adpt->phy.digital)
iounmap(adpt->phy.digital);
iounmap(adpt->phy.base);
+ free_netdev(netdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a3ca406a3561..e5b0d795c301 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* maximum size.
*/
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+ tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
@@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
+ efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
+
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
- if (xdp_queue_number < efx->xdp_tx_queue_count)
+ if (xdp_queue_number < efx->xdp_tx_queue_count) {
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
+ xdp_queue_number++;
+ }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
- if (xdp_queue_number)
- efx->xdp_tx_queue_count = xdp_queue_number;
+ WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e108b0d2bd28..4c9a37dd0d3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- bool mdio = false;
- int ret, i;
struct device_node *np;
+ int ret, i, phy_mode;
+ bool mdio = false;
np = dev_of_node(&pdev->dev);
@@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (plat->bus_id < 0)
plat->bus_id = pci_dev_id(pdev);
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
dev_err(&pdev->dev, "phy_mode not found\n");
+ plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e735134e8487..fcdb1d20389b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d9d6ecf8c63..7b8404a21544 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
}
priv->speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 072eff8079d0..5ca710844cc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ int phy_mode;
void *ret;
int rc;
@@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
eth_zero_addr(mac);
}
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
+ return ERR_PTR(phy_mode);
+ plat->phy_interface = phy_mode;
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
plat->interface = plat->phy_interface;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 4e86cdf2bc9f..580cc035536b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
- bool xmac;
+ bool xmac, est_rst = false;
+ int ret;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
sec = quotient;
nsec = reminder;
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ if (est_rst) {
+ struct timespec64 current_time, time;
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+ time.tv_sec = priv->plat->est->btr_reserve[1];
+ basetime = timespec64_to_ktime(time);
+ cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ priv->plat->est->ctr[0];
+ time = stmmac_calc_tas_basetime(basetime,
+ current_time_ns,
+ cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 92dab609d4f8..4f3b6437b114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time)
+{
+ struct timespec64 time;
+
+ if (ktime_after(old_base_time, current_time)) {
+ time = ktime_to_timespec64(old_base_time);
+ } else {
+ s64 n;
+ ktime_t base_time;
+
+ n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+ cycle_time);
+ base_time = ktime_add_ns(old_base_time,
+ (n + 1) * cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
+ return time;
+}
+
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time, current_time;
+ struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
@@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
+
+ mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
+ mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
+ mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- if (ktime_after(qopt->base_time, current_time_ns)) {
- time = ktime_to_timespec64(qopt->base_time);
- } else {
- ktime_t base_time;
- s64 n;
-
- n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
- qopt->cycle_time);
- base_time = ktime_add_ns(qopt->base_time,
- (n + 1) * qopt->cycle_time);
-
- time = ktime_to_timespec64(base_time);
- }
+ time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+ qopt->cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
+ qopt_time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
ctr = qopt->cycle_time;
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
- if (fpe && !priv->dma_cap.fpesel)
+ if (fpe && !priv->dma_cap.fpesel) {
+ mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
+ }
/* Actual FPE register configuration will be done after FPE handshake
* is success.
@@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 0b2ce4bdc2c3..e0cb713193ea 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
#endif
- free_netdev(dev);
-
cancel_work_sync(&priv->tlan_tqueue);
+ free_netdev(dev);
}
static void tlan_start(struct net_device *dev)
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 14f07050b6b1..0de2c4552f5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1504,9 +1504,8 @@ err_out_resource:
release_mem_region(start, len);
err_out_kfree:
- free_netdev(dev);
-
pr_err("%s: initialization failure, aborting!\n", fp->name);
+ free_netdev(dev);
return ret;
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 3811f1bde84e..b80ed2ffd45e 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
u16 sa_idx;
int ret;
- dev = xs->xso.dev;
+ dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
static void nsim_ipsec_del_sa(struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
@@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
ipsec->ok++;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bbbc6ac8fa82..53a433442803 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -78,6 +78,11 @@ enum {
/* Temperature read register (88E2110 only) */
MV_PCS_TEMP = 0x8042,
+ /* Number of ports on the device */
+ MV_PCS_PORT_INFO = 0xd00d,
+ MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
+ MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
#endif
};
+static int mv3310_get_number_of_ports(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
+ if (ret < 0)
+ return ret;
+
+ ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
+ ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
+
+ return ret + 1;
+}
+
+static int mv3310_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 1;
+}
+
+static int mv3340_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 4;
+}
+
static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
{
int val;
@@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3310_match_phy_device,
.name = "mv88x3310",
.driver_data = &mv3310_type,
.get_features = mv3310_get_features,
@@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
},
{
- .phy_id = MARVELL_PHY_ID_88X3340,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id = MARVELL_PHY_ID_88X3310,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3340_match_phy_device,
.name = "mv88x3340",
.driver_data = &mv3340_type,
.get_features = mv3310_get_features,
@@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
- { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
+ { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index aec97b021a73..2c115216420a 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return ret;
}
+ phy_suspend(priv->phydev);
priv->phydev->mac_managed_pm = 1;
phy_attached_info(priv->phydev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a58a2f013af..56c3f8519093 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
{
struct scatterlist *sgs[4], hdr, stat;
unsigned out_num = 0, tmp;
+ int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_warn(&vi->vdev->dev,
+ "Failed to add sgs for command vq: %d\n.", ret);
+ return false;
+ }
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c0bd9cbc43b1..1b483cf2b1ca 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,6 +26,10 @@
#include "vmxnet3_int.h"
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#define VXLAN_UDP_PORT 8472
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
@@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
+ u16 port;
+ struct udphdr *udph;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
@@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
- if (l4_proto != IPPROTO_UDP)
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ /* Check if offloaded port is supported */
+ if (port != GENEVE_UDP_PORT &&
+ port != IANA_VXLAN_UDP_PORT &&
+ port != VXLAN_UDP_PORT) {
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ break;
+ default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
}
return features;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 349ca18088e8..c54fdae950fb 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_cisco_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_cisco_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_cisco_init);
+module_exit(hdlc_cisco_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 72250fe0a1df..25e3564ce118 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_fr_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_fr_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_fr_init);
+module_exit(hdlc_fr_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 834be2ae3e9e..b81ecf432a0c 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_ppp_init(void)
{
skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_ppp_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_ppp_init);
+module_exit(hdlc_ppp_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 388fcc09b4dd..54d28496fefd 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_raw_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -98,14 +98,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_raw_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_raw_init);
+module_exit(hdlc_raw_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index c70a518b8b47..927596276a07 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_eth_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -118,14 +118,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_eth_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_eth_init);
+module_exit(hdlc_eth_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index d2bf72bf3bd7..9b7ebf8bd85c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_x25_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_x25_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_x25_init);
+module_exit(hdlc_x25_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7fd21049ff5a..63ec140c9c37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_WEP104:
if (!mvif->wep_sta)
return -EOPNOTSUPP;
+ break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index c2c4dc196802..cd690c64f65b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
- return -EIO;
+ goto fw_loaded;
}
ret = mt7921_load_patch(dev);
@@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
return -EIO;
}
+fw_loaded:
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
#ifdef CONFIG_PM
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 46f76e8aae92..0a472ce77370 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
return -EIO;
}
- /* check for the interafce id
- * if if_id 1 to 8 then create IP MUX channel sessions.
- * To start MUX session from 0 as network interface id would start
- * from 1 so map it to if_id = if_id - 1
- */
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
-
- return -EINVAL;
+ return ipc_mux_open_session(ipc_imem->mux, if_id);
}
/* Release a net link to CP. */
@@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
{
if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
if_id <= IP_MUX_SESSION_END)
- ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+ ipc_mux_close_session(ipc_imem->mux, if_id);
}
/* Tasklet call to do uplink transfer. */
@@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
goto out;
}
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- /* Route the UL packet through IP MUX Layer */
- ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
- if_id - 1, skb);
- else
- dev_err(ipc_imem->dev,
- "invalid if_id %d: ", if_id);
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
out:
return ret;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index fd356dafbdd6..2007fe23e9a5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -27,11 +27,11 @@
#define BOOT_CHECK_DEFAULT_TIMEOUT 400
/* IP MUX channel range */
-#define IP_MUX_SESSION_START 1
-#define IP_MUX_SESSION_END 8
+#define IP_MUX_SESSION_START 0
+#define IP_MUX_SESSION_END 7
/* Default IP MUX channel */
-#define IP_MUX_SESSION_DEFAULT 1
+#define IP_MUX_SESSION_DEFAULT 0
/**
* ipc_imem_sys_port_open - Open a port link to CP.
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index e634ffc6ec08..562de275797a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
- return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id);
}
/* Decode Flow Credit Table in the block */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
index 2229d752926c..d12188ffed7e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent)
/* Store the device and event information */
info->dev = dev;
- snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+ snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
/* Schedule uevent in process context using work queue */
schedule_work(&info->work);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index c999c64001f4..b2357ad5d517 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ unsigned int len = skb->len;
int if_id = priv->if_id;
int ret;
@@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
/* Return code of zero is success */
if (ret == 0) {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += len;
ret = NETDEV_TX_OK;
} else if (ret == -EBUSY) {
ret = NETDEV_TX_BUSY;
@@ -140,7 +143,8 @@ exit:
ret);
dev_kfree_skb_any(skb);
- return ret;
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
/* Ops structure for wwan net link */
@@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->priv_flags |= IFF_NO_QUEUE;
iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->mtu = ETH_DATA_LEN;
iosm_dev->min_mtu = ETH_MIN_MTU;
iosm_dev->max_mtu = ETH_MAX_MTU;
@@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
skb->pkt_type = PACKET_HOST;
- if (if_id < (IP_MUX_SESSION_START - 1) ||
- if_id > (IP_MUX_SESSION_END - 1)) {
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id > IP_MUX_SESSION_END) {
ret = -EINVAL;
goto free;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d3c5086673bc..320051f5a3dd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
wmb(); /* ensure the first interrupt sees the initialization */
}
+/*
+ * Try getting shutdown_lock while setting up IO queues.
+ */
+static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+{
+ /*
+ * Give up if the lock is being held by nvme_dev_disable.
+ */
+ if (!mutex_trylock(&dev->shutdown_lock))
+ return -ENODEV;
+
+ /*
+ * Controller is in wrong state, fail early.
+ */
+ if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+ mutex_unlock(&dev->shutdown_lock);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
@@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
goto release_cq;
nvmeq->cq_vector = vector;
- nvme_init_queue(nvmeq, qid);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ nvme_init_queue(nvmeq, qid);
if (!polled) {
result = queue_request_irq(nvmeq);
if (result < 0)
@@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
}
set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+ mutex_unlock(&dev->shutdown_lock);
return result;
release_sq:
dev->online_queues--;
+ mutex_unlock(&dev->shutdown_lock);
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
@@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- clear_bit(NVMEQ_ENABLED, &adminq->flags);
+ /*
+ * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
+ * from set to unset. If there is a window to it is truely freed,
+ * pci_free_irq_vectors() jumping into this window will crash.
+ * And take lock to avoid racing with pci_free_irq_vectors() in
+ * nvme_dev_disable() path.
+ */
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = nvme_remap_bar(dev, size);
if (!result)
break;
- if (!--nr_io_queues)
- return -ENOMEM;
+ if (!--nr_io_queues) {
+ result = -ENOMEM;
+ goto out_unlock;
+ }
} while (1);
adminq->q_db = dev->dbs;
retry:
/* Deregister the admin queue's interrupt */
- pci_free_irq(pdev, 0, adminq);
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
@@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues);
- if (result <= 0)
- return -EIO;
+ if (result <= 0) {
+ result = -EIO;
+ goto out_unlock;
+ }
dev->num_vecs = result;
result = max(result - 1, 1);
@@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
result = queue_request_irq(adminq);
if (result)
- return result;
+ goto out_unlock;
set_bit(NVMEQ_ENABLED, &adminq->flags);
+ mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2)
@@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
nvme_suspend_io_queues(dev);
goto retry;
}
@@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
return 0;
+out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return result;
}
static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2962,7 +3012,6 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
- nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 12acfe05cd68..8cb15ee5b249 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -123,7 +123,6 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr;
struct sockaddr_storage src_addr;
- struct net_device *ndev;
struct nvme_ctrl ctrl;
struct work_struct err_work;
@@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
}
if (opts->mask & NVMF_OPT_HOST_IFACE) {
- ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
- if (!ctrl->ndev) {
+ if (!__dev_get_by_name(&init_net, opts->host_iface)) {
pr_err("invalid interface passed: %s\n",
opts->host_iface);
ret = -ENODEV;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9bab07302bbf..d32fbfc93ea9 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -230,8 +230,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
}
/* If arch decided it can't, fall through... */
-#endif /* HAVE_PCI_MMAP */
fallthrough;
+#endif /* HAVE_PCI_MMAP */
default:
ret = -EINVAL;
break;
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 3d45ed0157c6..a6ebdb269fdd 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1728,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
break;
case AB8500_FG_CALIB_WAIT:
dev_dbg(di->dev, "Calibration WFI\n");
+ break;
default:
break;
}
@@ -2224,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
queue_work(di->fg_wq, &di->fg_work);
break;
}
+ break;
default:
break;
}
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index a17849bfacbf..b72826cf6794 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1150,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 8673d1743faa..28a6fe342d3e 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -3,7 +3,7 @@
# Makefile for PTP 1588 clock support.
#
-ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o
ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o
ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index a23a37a4d5dc..4dfc52e06704 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -24,10 +24,11 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
+struct class *ptp_class;
+
/* private globals */
static dev_t ptp_devt;
-static struct class *ptp_class;
static DEFINE_IDA(ptp_clocks_map);
@@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
return ptp->info->settime64(ptp->info, tp);
}
@@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
@@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev)
ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
int err = 0, index, major = MAJOR(ptp_devt);
+ size_t size;
if (info->n_alarm > PTP_MAX_ALARMS)
return ERR_PTR(-EINVAL);
@@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
mutex_init(&ptp->pincfg_mux);
+ mutex_init(&ptp->n_vclocks_mux);
init_waitqueue_head(&ptp->tsev_wq);
if (ptp->info->do_aux_work) {
@@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
- ptp->pps_source->lookup_cookie = ptp;
+ }
+
+ /* PTP virtual clock is being registered under physical clock */
+ if (parent && parent->class && parent->class->name &&
+ strcmp(parent->class->name, "ptp") == 0)
+ ptp->is_virtual_clock = true;
+
+ if (!ptp->is_virtual_clock) {
+ ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
+
+ size = sizeof(int) * ptp->max_vclocks;
+ ptp->vclock_index = kzalloc(size, GFP_KERNEL);
+ if (!ptp->vclock_index) {
+ err = -ENOMEM;
+ goto no_mem_for_vclocks;
+ }
}
err = ptp_populate_pin_groups(ptp);
@@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to register pps source\n");
goto no_pps;
}
+ ptp->pps_source->lookup_cookie = ptp;
}
/* Initialize a new device of our class in our clock structure. */
@@ -265,11 +295,14 @@ no_clock:
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
+ kfree(ptp->vclock_index);
+no_mem_for_vclocks:
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
@@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register);
int ptp_clock_unregister(struct ptp_clock *ptp)
{
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ kfree(ptp->vclock_index);
+
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 6b97155148f1..dba6be477067 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -18,6 +18,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
+#define PTP_DEFAULT_MAX_VCLOCKS 20
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
@@ -46,6 +47,24 @@ struct ptp_clock {
const struct attribute_group *pin_attr_groups[2];
struct kthread_worker *kworker;
struct kthread_delayed_work aux_work;
+ unsigned int max_vclocks;
+ unsigned int n_vclocks;
+ int *vclock_index;
+ struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */
+ bool is_virtual_clock;
+};
+
+#define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
+#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc)
+#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work)
+
+struct ptp_vclock {
+ struct ptp_clock *pclock;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t lock; /* protects tc/cc */
};
/*
@@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}
+/* Check if ptp virtual clock is in use */
+static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
+{
+ bool in_use = false;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return true;
+
+ if (!ptp->is_virtual_clock && ptp->n_vclocks)
+ in_use = true;
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return in_use;
+}
+
+extern struct class *ptp_class;
+
/*
* see ptp_chardev.c
*/
@@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[];
int ptp_populate_pin_groups(struct ptp_clock *ptp);
void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock);
+void ptp_vclock_unregister(struct ptp_vclock *vclock);
#endif
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index be076a91e20e..b3d96b747292 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -3,6 +3,7 @@
* PTP 1588 clock support - sysfs interface.
*
* Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2021 NXP
*/
#include <linux/capability.h>
#include <linux/slab.h>
@@ -148,6 +149,159 @@ out:
}
static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
+static int unregister_vclock(struct device *dev, void *data)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *info = ptp->info;
+ struct ptp_vclock *vclock;
+ u8 *num = data;
+
+ vclock = info_to_vclock(info);
+ dev_info(dev->parent, "delete virtual clock ptp%d\n",
+ vclock->clock->index);
+
+ ptp_vclock_unregister(vclock);
+ (*num)--;
+
+ /* For break. Not error. */
+ if (*num == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t n_vclocks_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ ssize_t size;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return size;
+}
+
+static ssize_t n_vclocks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_vclock *vclock;
+ int err = -EINVAL;
+ u32 num, i;
+
+ if (kstrtou32(buf, 0, &num))
+ return err;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ if (num > ptp->max_vclocks) {
+ dev_err(dev, "max value is %d\n", ptp->max_vclocks);
+ goto out;
+ }
+
+ /* Need to create more vclocks */
+ if (num > ptp->n_vclocks) {
+ for (i = 0; i < num - ptp->n_vclocks; i++) {
+ vclock = ptp_vclock_register(ptp);
+ if (!vclock)
+ goto out;
+
+ *(ptp->vclock_index + ptp->n_vclocks + i) =
+ vclock->clock->index;
+
+ dev_info(dev, "new virtual clock ptp%d\n",
+ vclock->clock->index);
+ }
+ }
+
+ /* Need to delete vclocks */
+ if (num < ptp->n_vclocks) {
+ i = ptp->n_vclocks - num;
+ device_for_each_child_reverse(dev, &i,
+ unregister_vclock);
+
+ for (i = 1; i <= ptp->n_vclocks - num; i++)
+ *(ptp->vclock_index + ptp->n_vclocks - i) = -1;
+ }
+
+ if (num == 0)
+ dev_info(dev, "only physical clock in use now\n");
+ else
+ dev_info(dev, "guarantee physical clock free running\n");
+
+ ptp->n_vclocks = num;
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return count;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ return err;
+}
+static DEVICE_ATTR_RW(n_vclocks);
+
+static ssize_t max_vclocks_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ ssize_t size;
+
+ size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+
+ return size;
+}
+
+static ssize_t max_vclocks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int *vclock_index;
+ int err = -EINVAL;
+ size_t size;
+ u32 max;
+
+ if (kstrtou32(buf, 0, &max) || max == 0)
+ return -EINVAL;
+
+ if (max == ptp->max_vclocks)
+ return count;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ if (max < ptp->n_vclocks)
+ goto out;
+
+ size = sizeof(int) * max;
+ vclock_index = kzalloc(size, GFP_KERNEL);
+ if (!vclock_index) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ size = sizeof(int) * ptp->n_vclocks;
+ memcpy(vclock_index, ptp->vclock_index, size);
+
+ kfree(ptp->vclock_index);
+ ptp->vclock_index = vclock_index;
+ ptp->max_vclocks = max;
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return count;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ return err;
+}
+static DEVICE_ATTR_RW(max_vclocks);
+
static struct attribute *ptp_attrs[] = {
&dev_attr_clock_name.attr,
@@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = {
&dev_attr_fifo.attr,
&dev_attr_period.attr,
&dev_attr_pps_enable.attr,
+ &dev_attr_n_vclocks.attr,
+ &dev_attr_max_vclocks.attr,
NULL
};
@@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj,
} else if (attr == &dev_attr_pps_enable.attr) {
if (!info->pps)
mode = 0;
+ } else if (attr == &dev_attr_n_vclocks.attr ||
+ attr == &dev_attr_max_vclocks.attr) {
+ if (ptp->is_virtual_clock)
+ mode = 0;
}
return mode;
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
new file mode 100644
index 000000000000..e0f87c57749a
--- /dev/null
+++ b/drivers/ptp/ptp_vclock.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP virtual clock driver
+ *
+ * Copyright 2021 NXP
+ */
+#include <linux/slab.h>
+#include "ptp_private.h"
+
+#define PTP_VCLOCK_CC_SHIFT 31
+#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT)
+#define PTP_VCLOCK_FADJ_SHIFT 9
+#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL
+#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2)
+
+static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+ s64 adj;
+
+ adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
+ adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_read(&vclock->tc);
+ vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_adjtime(&vclock->tc, delta);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_read(&vclock->tc);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int ptp_vclock_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_init(&vclock->tc, &vclock->cc, ns);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ struct timespec64 ts;
+
+ ptp_vclock_gettime(&vclock->info, &ts);
+
+ return PTP_VCLOCK_REFRESH_INTERVAL;
+}
+
+static const struct ptp_clock_info ptp_vclock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp virtual clock",
+ /* The maximum ppb value that long scaled_ppm can support */
+ .max_adj = 32767999,
+ .adjfine = ptp_vclock_adjfine,
+ .adjtime = ptp_vclock_adjtime,
+ .gettime64 = ptp_vclock_gettime,
+ .settime64 = ptp_vclock_settime,
+ .do_aux_work = ptp_vclock_refresh,
+};
+
+static u64 ptp_vclock_read(const struct cyclecounter *cc)
+{
+ struct ptp_vclock *vclock = cc_to_vclock(cc);
+ struct ptp_clock *ptp = vclock->pclock;
+ struct timespec64 ts = {};
+
+ if (ptp->info->gettimex64)
+ ptp->info->gettimex64(ptp->info, &ts, NULL);
+ else
+ ptp->info->gettime64(ptp->info, &ts);
+
+ return timespec64_to_ns(&ts);
+}
+
+static const struct cyclecounter ptp_vclock_cc = {
+ .read = ptp_vclock_read,
+ .mask = CYCLECOUNTER_MASK(32),
+ .mult = PTP_VCLOCK_CC_MULT,
+ .shift = PTP_VCLOCK_CC_SHIFT,
+};
+
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+{
+ struct ptp_vclock *vclock;
+
+ vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
+ if (!vclock)
+ return NULL;
+
+ vclock->pclock = pclock;
+ vclock->info = ptp_vclock_info;
+ vclock->cc = ptp_vclock_cc;
+
+ snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
+ pclock->index);
+
+ spin_lock_init(&vclock->lock);
+
+ vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
+ if (IS_ERR_OR_NULL(vclock->clock)) {
+ kfree(vclock);
+ return NULL;
+ }
+
+ timecounter_init(&vclock->tc, &vclock->cc, 0);
+ ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
+
+ return vclock;
+}
+
+void ptp_vclock_unregister(struct ptp_vclock *vclock)
+{
+ ptp_clock_unregister(vclock->clock);
+ kfree(vclock);
+}
+
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{
+ char name[PTP_CLOCK_NAME_LEN] = "";
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int num = 0;
+
+ if (pclock_index < 0)
+ return num;
+
+ snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
+ dev = class_find_device_by_name(ptp_class, name);
+ if (!dev)
+ return num;
+
+ ptp = dev_get_drvdata(dev);
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
+ put_device(dev);
+ return num;
+ }
+
+ *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
+ if (!(*vclock_index))
+ goto out;
+
+ memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
+ num = ptp->n_vclocks;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ put_device(dev);
+ return num;
+}
+EXPORT_SYMBOL(ptp_get_vclocks_index);
+
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index)
+{
+ char name[PTP_CLOCK_NAME_LEN] = "";
+ struct ptp_vclock *vclock;
+ struct ptp_clock *ptp;
+ unsigned long flags;
+ struct device *dev;
+ u64 ns;
+
+ snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index);
+ dev = class_find_device_by_name(ptp_class, name);
+ if (!dev)
+ return;
+
+ ptp = dev_get_drvdata(dev);
+ if (!ptp->is_virtual_clock) {
+ put_device(dev);
+ return;
+ }
+
+ vclock = info_to_vclock(ptp->info);
+
+ ns = ktime_to_ns(hwtstamps->hwtstamp);
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_cyc2time(&vclock->tc, ns);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ put_device(dev);
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL(ptp_convert_timestamp);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 5537b5f6dd5d..e157273fd2f7 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -190,12 +190,9 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
- if (err)
- return err;
- }
+ err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
if (!enabled)
return berlin_pwm_enable(chip, pwm);
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 8a3d781e6514..fc3cb7d669c6 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -64,6 +64,11 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
bool enabled = state->enabled;
+ void __iomem *base = ep93xx_pwm->base;
+ unsigned long long c;
+ unsigned long period_cycles;
+ unsigned long duty_cycles;
+ unsigned long term;
if (state->polarity != pwm->state.polarity) {
if (enabled) {
@@ -97,57 +102,47 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
- void __iomem *base = ep93xx_pwm->base;
- unsigned long long c;
- unsigned long period_cycles;
- unsigned long duty_cycles;
- unsigned long term;
+ /*
+ * The clock needs to be enabled to access the PWM registers.
+ * Configuration can be changed at any time.
+ */
+ if (!pwm_is_enabled(pwm)) {
+ ret = clk_prepare_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
+ }
- /*
- * The clock needs to be enabled to access the PWM registers.
- * Configuration can be changed at any time.
- */
- if (!pwm_is_enabled(pwm)) {
- ret = clk_prepare_enable(ep93xx_pwm->clk);
- if (ret)
- return ret;
- }
+ c = clk_get_rate(ep93xx_pwm->clk);
+ c *= state->period;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ c = period_cycles;
+ c *= state->duty_cycle;
+ do_div(c, state->period);
+ duty_cycles = c;
- c = clk_get_rate(ep93xx_pwm->clk);
- c *= state->period;
- do_div(c, 1000000000);
- period_cycles = c;
-
- c = period_cycles;
- c *= state->duty_cycle;
- do_div(c, state->period);
- duty_cycles = c;
-
- if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
- term = readw(base + EP93XX_PWMx_TERM_COUNT);
-
- /* Order is important if PWM is running */
- if (period_cycles > term) {
- writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
- writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
- } else {
- writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
- writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
- }
- ret = 0;
+ if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
+ term = readw(base + EP93XX_PWMx_TERM_COUNT);
+
+ /* Order is important if PWM is running */
+ if (period_cycles > term) {
+ writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+ writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
} else {
- ret = -EINVAL;
+ writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+ writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
}
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
- if (!pwm_is_enabled(pwm))
- clk_disable_unprepare(ep93xx_pwm->clk);
+ if (!pwm_is_enabled(pwm))
+ clk_disable_unprepare(ep93xx_pwm->clk);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
if (!enabled) {
ret = clk_prepare_enable(ep93xx_pwm->clk);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 48c31dac2f32..54c7990967dd 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -177,12 +177,9 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period);
- if (err)
- return err;
- }
+ err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
if (!pwm->state.enabled)
return spear_pwm_enable(chip, pwm);
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index f2a85e8dd941..7004f55bbf11 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
- if (state->period != cstate->period ||
- state->duty_cycle != cstate->duty_cycle) {
- ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
- state->period);
- if (ret)
- return ret;
- }
+ ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
+ state->period);
+ if (ret)
+ return ret;
sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
} else if (cstate->enabled) {
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index dec3f1fb150c..35eb19a5a0d1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -189,16 +189,13 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- if (state->period > NSEC_PER_SEC)
- return -ERANGE;
+ if (state->period > NSEC_PER_SEC)
+ return -ERANGE;
- err = ecap_pwm_config(chip, pwm, state->duty_cycle,
- state->period, enabled);
- if (err)
- return err;
- }
+ err = ecap_pwm_config(chip, pwm, state->duty_cycle,
+ state->period, enabled);
+ if (err)
+ return err;
if (!enabled)
return ecap_pwm_enable(chip, pwm);
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 8abb42923307..cc8237afeffa 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device,
case MTSEEK:
if (device->required_tapemarks)
tape_std_terminate_write(device);
- default:
- ;
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index b341075397d9..377e3689d1d4 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1454,6 +1454,7 @@ again:
get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "normal RX");
+ break;
default:
break;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d308ff744a29..f0d6f205c53c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card,
if (qeth_is_ipafunc_supported(card, prot,
IPA_OSA_MC_ROUTER))
return 0;
+ goto out_inval;
default:
goto out_inval;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 2e4804ef2fb9..6da8f6d05d39 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2377,7 +2377,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
}
}
- blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
+ blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
}
/**
@@ -2599,8 +2599,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
io->fcp_cmnd_length = FCP_CMND_LEN;
if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
- io->data_block_length = scsi_cmnd->device->sector_size;
- io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+ io->data_block_length = scsi_prot_interval(scsi_cmnd);
+ io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
}
if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 1c6b4e672687..a12e3525977d 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1823,7 +1823,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
SCp->device->simple_tags) {
- slot->tag = SCp->request->tag;
+ slot->tag = scsi_cmd_to_rq(SCp)->tag;
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
} else {
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index adddcd589941..40088dcb98cd 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
blogic_info(" DMA Channel: None, ", adapter);
if (adapter->bios_addr > 0)
- blogic_info("BIOS Address: 0x%lX, ", adapter,
+ blogic_info("BIOS Address: 0x%X, ", adapter,
adapter->bios_addr);
else
blogic_info("BIOS Address: None, ", adapter);
@@ -3436,7 +3436,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
int len = 0;
va_start(args, adapter);
- len = vsprintf(buf, fmt, args);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
static int msglines = 0;
@@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
if (buf[0] != '\n' || len > 1)
printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
} else {
if (begin) {
if (adapter != NULL && adapter->adapter_initd)
@@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
else
printk("%s%s", blogic_msglevelmap[msglevel], buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
}
begin = (buf[len - 1] == '\n');
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8f44d433e06e..6e3a04107bb6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -14,12 +14,16 @@ config RAID_ATTRS
help
Provides RAID
+config SCSI_COMMON
+ tristate
+
config SCSI
tristate "SCSI device support"
depends on BLOCK
select SCSI_DMA if HAS_DMA
select SG_POOL
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
+ select BLK_DEV_BSG_COMMON if BLK_DEV_BSG
help
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
@@ -140,6 +144,18 @@ config CHR_DEV_SG
If unsure, say N.
+config BLK_DEV_BSG
+ bool "/dev/bsg support (SG v4)"
+ depends on SCSI
+ default y
+ help
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any SCSI device.
+
+ This option is required by UDEV to access device serial numbers, etc.
+
+ If unsure, say Y.
+
config CHR_DEV_SCH
tristate "SCSI media changer support"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1748d1ec1338..f086eca2bcd7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -20,7 +20,7 @@ CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
+obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -168,6 +168,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o
scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
+scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o
hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 3baadd068768..a85589a2a8af 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -778,7 +778,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+ if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) {
pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
instance->host_no);
BUG();
@@ -1710,7 +1710,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
count = sun3scsi_dma_xfer_len(hostdata, cmd);
if (count > 0) {
- if (rq_data_dir(cmd->request))
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
cmd->SCp.ptr, count);
else
@@ -2158,7 +2158,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
count = sun3scsi_dma_xfer_len(hostdata, tmp);
if (count > 0) {
- if (rq_data_dir(tmp->request))
+ if (tmp->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
tmp->SCp.ptr, count);
else
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 46b8dffce2dd..c2d6f0a9e0b1 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -25,7 +25,6 @@
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/uaccess.h>
-#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -1505,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->request->timeout/HZ;
+ timeout = scsi_cmd_to_rq(cmd)->timeout / HZ;
if (timeout == 0)
timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 54eb4d41bc2c..deb32c9f4b3e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -224,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
{
struct fib *fibptr;
- fibptr = &dev->fibs[scmd->request->tag];
+ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
/*
* Null out fields that depend on being zero at the start of
* each I/O
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index f3377e2ef5fb..ffb391967573 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7423,7 +7423,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
* Set the srb_tag to the command tag + 1, as
* srb_tag '0' is used internally by the chip.
*/
- srb_tag = scp->request->tag + 1;
+ srb_tag = scsi_cmd_to_rq(scp)->tag + 1;
asc_scsi_q->q2.srb_tag = srb_tag;
/*
@@ -7637,7 +7637,7 @@ static int
adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
adv_req_t **adv_reqpp)
{
- u32 srb_tag = scp->request->tag;
+ u32 srb_tag = scsi_cmd_to_rq(scp)->tag;
adv_req_t *reqp;
ADV_SCSI_REQ_Q *scsiqp;
int ret;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 1210e61afb18..584a59522038 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -262,11 +262,12 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
void *buf = acmd->data_buffer;
struct req_iterator iter;
struct bio_vec bv;
- rq_for_each_segment(bv, cmd->request, iter) {
+ rq_for_each_segment(bv, rq, iter) {
memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
bv.bv_len);
buf += bv.bv_len;
@@ -447,11 +448,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
#endif
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
void *buf = acmd->data_buffer;
struct req_iterator iter;
struct bio_vec bv;
- rq_for_each_segment(bv, cmd->request, iter) {
+ rq_for_each_segment(bv, rq, iter) {
memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
bv.bv_len);
buf += bv.bv_len;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 462717bbb5b7..4e899ec1477d 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -235,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- rc = -ENOMEM;
- goto free_cmd;
+ return -ENOMEM;
}
sge = nonembedded_sgl(wrb);
@@ -269,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
/* copy the response, if any */
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
- /**
- * This is special case of NTWK_GET_IF_INFO where the size of
- * response is not known. beiscsi_if_get_info checks the return
- * value to free DMA buffer.
- */
- if (rc == -EAGAIN)
- return rc;
-
- /**
- * If FW is busy that is driver timed out, DMA buffer is saved with
- * the tag, only when the cmd completes this buffer is freed.
- */
- if (rc == -EBUSY)
- return rc;
-
-free_cmd:
- dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
- nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -309,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
return 0;
}
+static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *cmd, int rc)
+{
+ /*
+ * If FW is busy the DMA buffer is saved with the tag. When the cmd
+ * completes this buffer is freed.
+ */
+ if (rc == -EBUSY)
+ return;
+
+ dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma);
+}
+
static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
{
struct be_dma_mem *tag_mem;
@@ -344,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
- __beiscsi_eq_delay_compl, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl,
+ NULL, 0);
+ if (rc) {
+ /*
+ * Only free on failure. Async cmds are handled like -EBUSY
+ * where it's handled for us.
+ */
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ }
+ return rc;
}
/**
@@ -372,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
req->hdr.version = 1;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
&resp, sizeof(resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -449,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
req->ip_addr.ip_type = ip_type;
memcpy(req->ip_addr.addr, gw,
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val);
+ return rt_val;
}
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -499,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
req = nonemb_cmd.va;
req->ip_type = ip_type;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- resp, sizeof(*resp));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp,
+ sizeof(*resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static int
@@ -537,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
"BG_%d : failed to clear IP: rc %d status %d\n",
rc, req->ip_params.ip_record.status);
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -581,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
if (req->ip_params.ip_record.status)
rc = -EINVAL;
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -608,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
reldhcp->interface_hndl = phba->interface_handle;
reldhcp->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : failed to release existing DHCP: %d\n",
@@ -689,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
exit:
kfree(if_info);
return rc;
@@ -762,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : Memory Allocation Failure\n");
- /* Free the DMA memory for the IOCTL issuing */
- dma_free_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd,
+ -ENOMEM);
return -ENOMEM;
}
@@ -781,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
nonemb_cmd.va)->actual_resp_len;
ioctl_size += sizeof(struct be_cmd_req_hdr);
- /* Free the previous allocated DMA memory */
- dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
/* Free the virtual memory */
kfree(*if_info);
- } else
+ } else {
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
break;
+ }
} while (true);
return rc;
}
@@ -806,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
if (rc)
return rc;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- nic, sizeof(*nic));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 43e8a1dafec0..5521469ce678 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1918,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_unlock(&session->back_lock);
- p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
+ p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
rc = -EINVAL;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index fc7197abfcdf..27012908b586 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -618,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
return 0;
}
+struct changer_element_status32 {
+ int ces_type;
+ compat_uptr_t ces_data;
+};
+#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32)
+
static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -748,7 +754,20 @@ static long ch_ioctl(struct file *file,
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
}
+#ifdef CONFIG_COMPAT
+ case CHIOGSTATUS32:
+ {
+ struct changer_element_status32 ces32;
+ if (copy_from_user(&ces32, argp, sizeof(ces32)))
+ return -EFAULT;
+ if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
+ return -EINVAL;
+
+ return ch_gstatus(ch, ces32.ces_type,
+ compat_ptr(ces32.ces_data));
+ }
+#endif
case CHIOGELEM:
{
struct changer_get_element cge;
@@ -858,59 +877,11 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, cmd, argp);
+ return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
}
}
-#ifdef CONFIG_COMPAT
-
-struct changer_element_status32 {
- int ces_type;
- compat_uptr_t ces_data;
-};
-#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
-
-static long ch_ioctl_compat(struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- scsi_changer *ch = file->private_data;
- int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
- file->f_flags & O_NDELAY);
- if (retval)
- return retval;
-
- switch (cmd) {
- case CHIOGPARAMS:
- case CHIOGVPARAMS:
- case CHIOPOSITION:
- case CHIOMOVE:
- case CHIOEXCHANGE:
- case CHIOGELEM:
- case CHIOINITELEM:
- case CHIOSVOLTAG:
- /* compatible */
- return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
- case CHIOGSTATUS32:
- {
- struct changer_element_status32 ces32;
- unsigned char __user *data;
-
- if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
- return -EFAULT;
- if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
- return -EINVAL;
-
- data = compat_ptr(ces32.ces_data);
- return ch_gstatus(ch, ces32.ces_type, data);
- }
- default:
- return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
-
- }
-}
-#endif
-
/* ------------------------------------------------------------------------ */
static int ch_probe(struct device *dev)
@@ -1015,9 +986,7 @@ static const struct file_operations changer_fops = {
.open = ch_open,
.release = ch_release,
.unlocked_ioctl = ch_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ch_ioctl_compat,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 56b9ad0a1ca0..3b2eb6ce1fcf 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1786,7 +1786,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
- sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
+ sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
nr = fc_remote_port_chkready(rport);
if (nr) {
@@ -1989,13 +1989,13 @@ inval_scmnd:
csio_info(hw,
"Aborted SCSI command to (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return SUCCESS;
} else {
csio_info(hw,
"Failed to abort SCSI command, (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return FAILED;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 222593bc2afe..2f1894588e0b 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -433,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
hwq = afu->hwq_rr_count++ % afu->num_hwqs;
break;
case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scp->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
hwq = blk_mq_unique_tag_to_hwq(tag);
break;
case HWQ_MODE_CPU:
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index a18a4a08f049..7af96d14c9bc 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -652,7 +652,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
msg[2] = 0;
msg[3]= 0;
/* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = cmd->request->tag + 1;
+ msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -2236,7 +2236,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
msg[2] = 0;
/* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = cmd->request->tag + 1;
+ msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
// Our cards use the transaction context as the tag for queueing
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index e0d798d6baee..bb3b460dc0bc 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -780,7 +780,7 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
{
struct efct_lio_vport *lio_vport;
struct efct *efct;
- int ret = -1;
+ int ret;
u64 p_wwpn, npiv_wwpn, npiv_wwnn;
char *p, *pbuf, tmp[128];
struct efct_lio_vport_list_t *vport_list;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 762cc8bd2653..0f9cedf78872 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -107,7 +107,7 @@ static void fnic_cleanup_io(struct fnic *fnic);
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
struct scsi_cmnd *sc)
{
- u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+ u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
return &fnic->io_req_lock[hash];
}
@@ -390,7 +390,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
- fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+ fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
@@ -422,6 +422,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
+ const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
@@ -511,8 +512,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, sc->cmnd[0],
- sg_count, CMD_STATE(sc));
+ tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -571,7 +571,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, 0, 0,
+ tag, sc, 0, 0, 0,
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -603,8 +603,7 @@ out:
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, io_req,
- sg_count, cmd_trace,
+ tag, sc, io_req, sg_count, cmd_trace,
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
@@ -1364,6 +1363,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
bool reserved)
{
+ const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
struct fnic_io_req *io_req;
unsigned long flags = 0;
@@ -1371,7 +1371,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
+ io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1413,7 +1413,7 @@ cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
- sc->request->tag, sc, (jiffies - start_time));
+ tag, sc, jiffies - start_time);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1425,10 +1425,10 @@ cleanup_scsi_cmd:
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- sc->request->tag, sc);
+ tag, sc);
FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, sc->request->tag, sc,
+ sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
@@ -1566,7 +1566,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
- int abt_tag = sc->request->tag;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1727,6 +1727,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
*/
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -1741,7 +1742,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
- int tag;
+ const int tag = rq->tag;
unsigned long abt_issued_time;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1757,7 +1758,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device));
- tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
@@ -1842,8 +1842,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
- fc_lun.scsi_lun, io_req)) {
+ if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
+ io_req)) {
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
@@ -1943,8 +1943,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
}
fnic_abort_cmd_end:
- FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
@@ -1994,7 +1993,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
- fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+ fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2025,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
struct scsi_device *lun_dev = iter_data->lun_dev;
- int abt_tag = sc->request->tag;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -2206,14 +2205,15 @@ clean_pending_aborts_end:
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct request_queue *q = sc->request->q;
+ struct request *rq = scsi_cmd_to_rq(sc);
+ struct request_queue *q = rq->q;
struct request *dummy;
dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(dummy))
return SCSI_NO_TAG;
- sc->tag = sc->request->tag = dummy->tag;
+ sc->tag = rq->tag = dummy->tag;
sc->host_scribble = (unsigned char *)dummy;
return dummy->tag;
@@ -2238,6 +2238,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
*/
int fnic_device_reset(struct scsi_cmnd *sc)
{
+ struct request *rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -2250,7 +2251,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
- int tag = 0;
+ int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
@@ -2284,7 +2285,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
- tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
* Really should fix the midlayer to pass in a proper
@@ -2458,8 +2458,7 @@ fnic_device_reset_clean:
}
fnic_device_reset_end:
- FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3a903e8e0384..9515c45affa5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -185,7 +185,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
void *bitmap = hisi_hba->slot_index_tags;
if (scsi_cmnd)
- return scsi_cmnd->request->tag;
+ return scsi_cmd_to_rq(scsi_cmnd)->tag;
spin_lock(&hisi_hba->lock);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
@@ -449,7 +449,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
unsigned int dq_index;
u32 blk_tag;
- blk_tag = blk_mq_unique_tag(scmd->request);
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
*dq_pointer = dq = &hisi_hba->dq[dq_index];
} else {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a4885d03afe2..3ab669dc806f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1153,7 +1153,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
{
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
unsigned int interval = scsi_prot_interval(scsi_cmnd);
- u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+ u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd));
switch (prot_op) {
case SCSI_PROT_READ_INSERT:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f135a10f582b..3faa87fa296a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5686,7 +5686,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
- BUG_ON(cmd->request->tag < 0);
+ BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
dev = cmd->device->hostdata;
if (!dev) {
@@ -5729,7 +5729,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
* and is therefore a brand-new command.
*/
if (likely(cmd->retries == 0 &&
- !blk_rq_is_passthrough(cmd->request) &&
+ !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
h->acciopath_status)) {
/* Submit with the retry_pending flag unset. */
rc = hpsa_ioaccel_submit(h, c, cmd, false);
@@ -5894,7 +5894,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h)
*/
static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
{
- int idx = scmd->request->tag;
+ int idx = scsi_cmd_to_rq(scmd)->tag;
if (idx < 0)
return idx;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 935b01ee44b7..7fa5e64e38c3 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1926,7 +1926,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct ibmvfc_cmd *vfc_cmd;
struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
- u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
+ u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
u16 scsi_channel;
int rc;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e6a3eaaa57d9..50df7dd9cb91 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1072,7 +1072,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->request->timeout/HZ);
+ scsi_cmd_to_rq(cmnd)->timeout / HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8b33c9871484..cdd94fb2aab7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3735,7 +3735,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->request->timeout;
+ TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index e1ff79464131..fcaa84a3c210 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -341,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
tc->reserved_E8_0 = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_gen = 0;
}
@@ -369,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
tc->app_tag_gen = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_verify = 0;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 052ee3a26f6e..c640535d1ac0 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS
tristate "SAS Domain Transport Attributes"
depends on SCSI
select SCSI_SAS_ATTRS
- select BLK_DEV_BSGLIB
help
This provides transport specific helpers for SAS drivers which
use the domain device construct (like the aic94xxx).
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index e63a54f5ab8c..9dc32736cf21 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -18,4 +18,4 @@ libsas-y += sas_init.o \
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
-ccflags-y := -DDEBUG
+ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4aa1fda95f35..a315715b3622 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -20,8 +20,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
#include <scsi/scsi_eh.h>
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
@@ -596,7 +596,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- blk_abort_request(qc->scsicmd->request);
+ blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
return;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 9f5068f3bcfb..12e1e36d7c04 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -16,7 +16,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
@@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work)
break;
#else
pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
- /* Fall through */
+ fallthrough;
#endif
/* Fall through - only for the #else condition above. */
default:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e00688540219..c2150a818423 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -18,7 +18,7 @@
#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static int sas_discover_expander(struct domain_device *dev);
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index eca2a6bf3601..32cdc969b736 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -14,7 +14,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 2b0f98ca6ec3..80592f53017a 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -19,7 +19,7 @@
#include "sas_internal.h"
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
static struct kmem_cache *sas_event_cache;
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 4ca4b1f30bd0..a0d592d11dfb 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Phy events ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e3d03d744713..67b429dcf1ff 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
{
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ee44a0d7730b..08ffb8788290 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -22,9 +22,9 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
-#include "../scsi_priv.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
+#include "scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
@@ -908,7 +908,7 @@ void sas_task_abort(struct sas_task *task)
if (dev_is_sata(task->dev))
sas_ata_task_abort(task);
else
- blk_abort_request(sc->request);
+ blk_abort_request(scsi_cmd_to_rq(sc));
}
int sas_slave_alloc(struct scsi_device *sdev)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 17028861234b..dd3ddfa5f761 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -922,7 +922,6 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint8_t fcp_embed_io;
- uint8_t nvme_support; /* Firmware supports NVME */
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index eb88aaaf36eb..869c2b6f1515 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4038,6 +4038,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
const char *val_buf = buf;
int err;
uint32_t prev_val;
+ u8 sli_family, if_type;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -4061,13 +4062,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
/*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
- * - G7/G6 with no private loop support
+ * - ASIC_GEN_NUM >= 0xC, with no private loop support
*/
-
+ sli_family = bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
- (!phba->sli4_hba.pc_sli4_params.pls &&
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
@@ -5412,9 +5416,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
/*
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
-# is [0,1]. Default value is 0.
+# is [0,1]. Default value is 1.
*/
-LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
@@ -6741,6 +6745,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LPFC_LINK_SPEED_128GHZ:
fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
break;
+ case LPFC_LINK_SPEED_256GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 737483c3f01d..41e0d8ef015a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -87,6 +87,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
+void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 610b6dabb3b5..a1c85fa135a9 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2846,6 +2846,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (phba->lmt & LMT_256Gb)
+ ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_128Gb)
ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_64Gb)
@@ -2927,6 +2929,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
case LPFC_LINK_SPEED_128GHZ:
ae->un.AttrInt = HBA_PORTSPEED_128GFC;
break;
+ case LPFC_LINK_SPEED_256GHZ:
+ ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ break;
default:
ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
break;
@@ -3884,9 +3889,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
* @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
- * cmdcode: FDMI command to send
- * mask: Mask of HBA or PORT Attributes to send
+ * @cmdcode: application server command code to send
+ * @vmid: pointer to vmid info structure
*
* Builds and sends a FDMI command using the CT subsystem.
*/
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 131374a61d7e..871b665bd72e 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -78,10 +78,11 @@ struct lpfc_node_rrqs {
};
enum lpfc_fc4_xpt_flags {
- NLP_WAIT_FOR_UNREG = 0x1,
- SCSI_XPT_REGD = 0x2,
- NVME_XPT_REGD = 0x4,
- NLP_XPT_HAS_HH = 0x8,
+ NLP_XPT_REGD = 0x1,
+ SCSI_XPT_REGD = 0x2,
+ NVME_XPT_REGD = 0x4,
+ NVME_XPT_UNREG_WAIT = 0x8,
+ NLP_XPT_HAS_HH = 0x10
};
struct lpfc_nodelist {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e481f5fe29d7..08ae2b12b92c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1664,6 +1664,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (!new_ndlp || (new_ndlp == ndlp))
return ndlp;
+ /*
+ * Unregister from backend if not done yet. Could have been skipped
+ * due to ADISC
+ */
+ lpfc_nlp_unreg_node(vport, new_ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -2025,9 +2031,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto check_plogi;
- else
+ if (!lpfc_error_lost_link(irsp))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
@@ -2080,7 +2084,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
- check_plogi:
if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
@@ -2607,6 +2610,14 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
+
+ /*
+ * If link is down, clear_la and reg_vpi will be done after
+ * flogi following a link up event
+ */
+ if (!lpfc_is_link_up(phba))
+ return;
+
/* The ADISCs are complete. Doesn't matter if they
* succeeded or failed because the ADISC completion
* routine guarantees to call the state machine and
@@ -2749,12 +2760,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto check_adisc;
- else
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
/* As long as this node is not registered with the SCSI or NVMe
* transport, it is no longer an active node. Otherwise
@@ -2772,7 +2780,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- check_adisc:
/* Check to see if there are more ADISCs to be sent */
if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
@@ -3375,6 +3382,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc);
@@ -3413,7 +3421,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* Keep the ndlp just in case RDF is being sent */
return 0;
}
@@ -3657,28 +3664,15 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
lpfc_enqueue_node(vport, ndlp);
}
- /* RDF ELS is not required on an NPIV VN_Port. */
- if (vport->port_type == LPFC_NPIV_PORT) {
- lpfc_nlp_put(ndlp);
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
return -EACCES;
- }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
if (!elsiocb)
return -ENOMEM;
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "0939 %s: FC_NODE x%x RPI x%x flag x%x "
- "ste x%x type x%x Not registered\n",
- __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
- ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_type);
- return -ENODEV;
- }
-
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
@@ -4378,7 +4372,7 @@ out_retry:
(cmd == ELS_CMD_NVMEPRLI))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
- else
+ else if (cmd != ELS_CMD_ADISC)
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_NPR_NODE);
ndlp->nlp_last_elscmd = cmd;
@@ -4612,6 +4606,15 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+
+ /* If PLOGI is being retried, PLOGI completion will cleanup the
+ * node. The NLP_NPR_2B_DISC flag needs to be retained to make
+ * progress on nodes discovered from last RSCN.
+ */
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+ goto out;
+
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
/* A LOGO is completing and the node is in NPR state.
@@ -5657,25 +5660,40 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(&ndlp->lock);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- sentadisc++;
- vport->num_disc_nodes++;
- if (vport->num_disc_nodes >=
- vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- break;
- }
+
+ if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
+ continue;
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(&ndlp->lock);
+
+ if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ /* This node was marked for ADISC but was not picked
+ * for discovery. This is possible if the node was
+ * missing in gidft response.
+ *
+ * At time of marking node for ADISC, we skipped unreg
+ * from backend
+ */
+ lpfc_nlp_unreg_node(vport, ndlp);
+ continue;
}
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ sentadisc++;
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ break;
+ }
+
}
if (sentadisc == 0) {
spin_lock_irq(shost->host_lock);
@@ -6087,6 +6105,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
case LPFC_LINK_SPEED_64GHZ:
rdp_speed = RDP_PS_64GB;
break;
+ case LPFC_LINK_SPEED_128GHZ:
+ rdp_speed = RDP_PS_128GB;
+ break;
+ case LPFC_LINK_SPEED_256GHZ:
+ rdp_speed = RDP_PS_256GB;
+ break;
default:
rdp_speed = RDP_PS_UNKNOWN;
break;
@@ -6094,6 +6118,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+ if (phba->lmt & LMT_256Gb)
+ rdp_cap |= RDP_PS_256GB;
if (phba->lmt & LMT_128Gb)
rdp_cap |= RDP_PS_128GB;
if (phba->lmt & LMT_64Gb)
@@ -6886,13 +6912,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- /* Check to see if we need to NVME rescan this target
- * remoteport.
- */
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
- ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
- lpfc_nvme_rescan_port(vport, ndlp);
-
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -8948,6 +8967,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_PRLO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cc5920979f8..6da2daf7d9e3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
case LPFC_LINK_SPEED_32GHZ:
case LPFC_LINK_SPEED_64GHZ:
case LPFC_LINK_SPEED_128GHZ:
+ case LPFC_LINK_SPEED_256GHZ:
break;
default:
phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -4501,10 +4502,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
spin_unlock_irqrestore(shost->host_lock, iflags);
}
+/* Register a node with backend if not already done */
+void
+lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ /* Already registered with backend, trigger rescan */
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
+ lpfc_nvme_rescan_port(vport, ndlp);
+ }
+ return;
+ }
+
+ ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (lpfc_valid_xpt_node(ndlp)) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+
+ /* We are done if we do not have any NVME remote node */
+ if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
+ return;
+
+ /* Notify the NVME transport of this new rport. */
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->nvmet_support == 0) {
+ /* Register this rport with the transport.
+ * Only NVME Target Rports are registered with
+ * the transport.
+ */
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
+ } else {
+ /* Just take an NDLP ref count since the
+ * target does not register rports.
+ */
+ lpfc_nlp_get(ndlp);
+ }
+ }
+}
+
+/* Unregister a node with backend if not already done */
+void
+lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ return;
+ }
+
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->rport &&
+ ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ }
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+}
+
+/*
+ * Adisc state change handling
+ */
+static void
+lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int new_state)
+{
+ switch (new_state) {
+ /*
+ * Any state to ADISC_ISSUE
+ * Do nothing, adisc cmpl handling will trigger state changes
+ */
+ case NLP_STE_ADISC_ISSUE:
+ break;
+
+ /*
+ * ADISC_ISSUE to mapped states
+ * Trigger a registration with backend, it will be nop if
+ * already registered
+ */
+ case NLP_STE_UNMAPPED_NODE:
+ ndlp->nlp_type |= NLP_FC_NODE;
+ fallthrough;
+ case NLP_STE_MAPPED_NODE:
+ ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ lpfc_nlp_reg_node(vport, ndlp);
+ break;
+
+ /*
+ * ADISC_ISSUE to non-mapped states
+ * We are moving from ADISC_ISSUE to a non-mapped state because
+ * ADISC failed, we would have skipped unregistering with
+ * backend, attempt it now
+ */
+ case NLP_STE_NPR_NODE:
+ ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ fallthrough;
+ default:
+ lpfc_nlp_unreg_node(vport, ndlp);
+ break;
+ }
+
+}
+
static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
+ /* Trap ADISC changes here */
+ if (new_state == NLP_STE_ADISC_ISSUE ||
+ old_state == NLP_STE_ADISC_ISSUE) {
+ lpfc_handle_adisc_state(vport, ndlp, new_state);
+ return;
+ }
+
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
@@ -4514,60 +4657,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_NPR_NODE)
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
- /* FCP and NVME Transport interface */
+ /* Reg/Unreg for FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- if (ndlp->rport &&
- lpfc_valid_xpt_node(ndlp)) {
- vport->phba->nport_event_cnt++;
- lpfc_unregister_remote_port(ndlp);
- }
-
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0) {
- /* Start devloss if target. */
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- lpfc_nvme_unregister_port(vport, ndlp);
- } else {
- /* NVMET has no upcall. */
- lpfc_nlp_put(ndlp);
- }
- }
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ lpfc_nlp_unreg_node(vport, ndlp);
}
- /* FCP and NVME Transport interfaces */
-
if (new_state == NLP_STE_MAPPED_NODE ||
- new_state == NLP_STE_UNMAPPED_NODE) {
- if (lpfc_valid_xpt_node(ndlp)) {
- vport->phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- */
- lpfc_register_remote_port(vport, ndlp);
- }
- /* Notify the NVME transport of this new rport. */
- if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
- ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- if (vport->phba->nvmet_support == 0) {
- /* Register this rport with the transport.
- * Only NVME Target Rports are registered with
- * the transport.
- */
- if (ndlp->nlp_type & NLP_NVME_TARGET) {
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
- }
- } else {
- /* Just take an NDLP ref count since the
- * target does not register rports.
- */
- lpfc_nlp_get(ndlp);
- }
- }
- }
+ new_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nlp_reg_node(vport, ndlp);
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4a5a85ed42ec..4083764916a5 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1694,6 +1694,7 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400
+#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index eb8c735a243b..aadbb0de629d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -94,6 +94,9 @@ struct lpfc_sli_intf {
#define LPFC_SLI_INTF_FAMILY_BE3 0x1
#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
+#define LPFC_SLI_INTF_FAMILY_G6 0xc
+#define LPFC_SLI_INTF_FAMILY_G7 0xd
+#define LPFC_SLI_INTF_FAMILY_G7P 0xe
#define lpfc_sli_intf_slirev_SHIFT 4
#define lpfc_sli_intf_slirev_MASK 0x0000000F
#define lpfc_sli_intf_slirev_WORD word0
@@ -959,6 +962,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_add_status_SHIFT 8
#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
#define lpfc_mbox_hdr_add_status_WORD word7
+#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2
+#define lpfc_mbox_hdr_add_status_2_SHIFT 16
+#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF
+#define lpfc_mbox_hdr_add_status_2_WORD word7
+#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01
+#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02
uint32_t response_length;
uint32_t actual_response_length;
} response;
@@ -1555,7 +1564,7 @@ struct rq_context {
#define lpfc_rq_context_hdr_size_WORD word1
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
-#define lpfc_rq_context_cq_id_MASK 0x000003FF
+#define lpfc_rq_context_cq_id_MASK 0x0000FFFF
#define lpfc_rq_context_cq_id_WORD word2
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
@@ -3328,17 +3337,20 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
-#define cfg_pvl_SHIFT 13
-#define cfg_pvl_MASK 0x00000001
-#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
#define cfg_nsler_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
+
+#define cfg_pbde_SHIFT 20
+#define cfg_pbde_MASK 0x00000001
+#define cfg_pbde_WORD word19
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
@@ -3603,6 +3615,9 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
+#define lpfc_cntl_attr_flash_id_SHIFT 16
+#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
+#define lpfc_cntl_attr_flash_id_WORD word17
uint32_t mbx_da_struct_ver;
uint32_t ep_fw_da_struct_ver;
uint32_t ncsi_ver_str[3];
@@ -4707,6 +4722,7 @@ union lpfc_wqe128 {
#define MAGIC_NUMBER_G6 0xFEAA0003
#define MAGIC_NUMBER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G7P 0xFEAA0020
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index d48414e295a0..6a90e6e53d09 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5983e05b648f..2c0aaa0a301d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1852,6 +1852,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
{
int rc;
uint32_t intr_mode;
+ LPFC_MBOXQ_t *mboxq;
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
@@ -1871,11 +1872,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
"Recovery...\n");
/* If we are no wait, the HBA has been reset and is not
- * functional, thus we should clear LPFC_SLI_ACTIVE flag.
+ * functional, thus we should clear
+ * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
*/
if (mbx_action == LPFC_MBX_NO_WAIT) {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
spin_unlock_irq(&phba->hbalock);
}
@@ -2590,6 +2599,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_LANCER_G7_FC:
m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
break;
+ case PCI_DEVICE_ID_LANCER_G7P_FC:
+ m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
@@ -3541,6 +3553,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
+
+ lpfc_unreg_rpi(vports[i], ndlp);
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
@@ -3556,7 +3570,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
- lpfc_unreg_rpi(vports[i], ndlp);
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
@@ -4666,6 +4679,8 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
if (phba->hba_flag & HBA_FCOE_MODE)
return;
+ if (phba->lmt & LMT_256Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
if (phba->lmt & LMT_128Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
if (phba->lmt & LMT_64Gb)
@@ -4845,7 +4860,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
/**
* lpfc_vmid_poll - VMID timeout detection
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
*
* This routine is invoked when there is no I/O on by a VM for the specified
* amount of time. When this situation is detected, the VMID has to be
@@ -5074,6 +5089,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_FC_LA_SPEED_128G:
port_speed = 128000;
break;
+ case LPFC_FC_LA_SPEED_256G:
+ port_speed = 256000;
+ break;
default:
port_speed = 0;
}
@@ -8537,9 +8555,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
}
/* FW supports persistent topology - override module parameter value */
phba->hba_flag |= HBA_PERSISTENT_TOPO;
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G7_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
+
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
if (!tf) {
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
? FLAGS_TOPOLOGY_MODE_LOOP
@@ -8547,8 +8568,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
} else {
phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
}
- break;
- default: /* G5 */
+ } else { /* G5 */
if (tf) {
/* If topology failover set - pt is '0' or '1' */
phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
@@ -8558,7 +8578,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
? FLAGS_TOPOLOGY_MODE_PT_PT
: FLAGS_TOPOLOGY_MODE_LOOP);
}
- break;
}
if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -12241,7 +12260,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_fc4_type);
fcponly:
- phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
phba->cfg_nvme_seg_cnt = 0;
@@ -12259,9 +12277,10 @@ fcponly:
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
- if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ /* Enable embedded Payload BDE if support is indicated */
+ if (bf_get(cfg_pbde, mbx_sli4_parameters))
+ phba->cfg_enable_pbde = 1;
+ else
phba->cfg_enable_pbde = 0;
/*
@@ -12299,7 +12318,7 @@ fcponly:
"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_pbde,
- phba->fcp_embed_io, phba->nvme_support,
+ phba->fcp_embed_io, sli4_params->nvme,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -12978,7 +12997,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
const struct firmware *fw)
{
int rc;
+ u8 sli_family;
+ sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
/* Three cases: (1) FW was not supported on the detected adapter.
* (2) FW update has been locked out administratively.
* (3) Some other error during FW update.
@@ -12986,10 +13007,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
* for admin diagnosis.
*/
if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
magic_number != MAGIC_NUMBER_G6) ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMBER_G7)) {
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
+ magic_number != MAGIC_NUMBER_G7) ||
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
+ magic_number != MAGIC_NUMBER_G7P)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3030 This firmware version is not supported on"
" this HBA model. Device:%x Magic:%x Type:%x "
@@ -14040,17 +14063,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
void
lpfc_sli4_ras_init(struct lpfc_hba *phba)
{
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
phba->ras_fwlog.ras_hwsupport = true;
if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
phba->cfg_ras_fwlog_buffsize)
phba->ras_fwlog.ras_enabled = true;
else
phba->ras_fwlog.ras_enabled = false;
- break;
- default:
+ } else {
phba->ras_fwlog.ras_hwsupport = false;
}
}
@@ -14163,8 +14187,9 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
unsigned int temp_idx;
int i;
int j = 0;
- unsigned long rem_nsec;
- struct lpfc_vport **vports;
+ unsigned long rem_nsec, iflags;
+ bool log_verbose = false;
+ struct lpfc_vport *port_iterator;
/* Don't dump messages if we explicitly set log_verbose for the
* physical port or any vport.
@@ -14172,16 +14197,24 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
if (phba->cfg_log_verbose)
return;
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- if (vports[i]->cfg_log_verbose) {
- lpfc_destroy_vport_work_array(phba, vports);
+ spin_lock_irqsave(&phba->port_list_lock, iflags);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
+ if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+ if (port_iterator->cfg_log_verbose)
+ log_verbose = true;
+
+ scsi_host_put(lpfc_shost_from_vport(port_iterator));
+
+ if (log_verbose) {
+ spin_unlock_irqrestore(&phba->port_list_lock,
+ iflags);
return;
}
}
}
- lpfc_destroy_vport_work_array(phba, vports);
+ spin_unlock_irqrestore(&phba->port_list_lock, iflags);
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 84bc373190d8..6c754ee96bee 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ /* Topology handling for ASIC_GEN_NUM 0xC and later */
+ if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
!(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e12f83fb795c..27263f02ab9f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -736,9 +736,13 @@ out:
* is already in MAPPED or UNMAPPED state. Catch this
* condition and don't set the nlp_state again because
* it causes an unnecessary transport unregister/register.
+ *
+ * Nodes marked for ADISC will move MAPPED or UNMAPPED state
+ * after issuing ADISC
*/
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
- if (ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
}
@@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
out:
+ /* Unregister from backend, could have been skipped due to ADISC */
+ lpfc_nlp_unreg_node(vport, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
- memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
-
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_unreg_rpi(vport, ndlp);
@@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
+ lpfc_disc_set_adisc(vport, ndlp);
+
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(&ndlp->lock);
- lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- spin_unlock_irq(&ndlp->lock);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
@@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index bcc804cefd30..f36294e9b5dd 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
- if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
- ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
+ if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
+ ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
spin_unlock_irq(&ndlp->lock);
@@ -2324,7 +2324,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* race that leaves the WAIT flag set.
*/
spin_lock_irq(&ndlp->lock);
- ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
spin_unlock_irq(&ndlp->lock);
rport = remote_port->private;
@@ -2336,7 +2336,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&ndlp->lock);
ndlp->nrport = NULL;
- ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
spin_unlock_irq(&ndlp->lock);
rport->ndlp = NULL;
rport->remoteport = NULL;
@@ -2488,7 +2488,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* The transport will update it.
*/
spin_lock_irq(&vport->phba->hbalock);
- ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
spin_unlock_irq(&vport->phba->hbalock);
/* Don't let the host nvme transport keep sending keep-alives
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 69a5a844c69c..f61223fbe644 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -37,8 +37,8 @@
#define LPFC_MAX_NVME_INFO_TMP_LEN 100
#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
-#define lpfc_ndlp_get_nrport(ndlp) \
- ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \
+#define lpfc_ndlp_get_nrport(ndlp) \
+ ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index f2d9a3580887..6e3dd0b9bcfa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
- spin_lock(&ctxp->ctxlock);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
if (ctxp->flag & LPFC_NVME_CTX_RLS &&
!(ctxp->flag & LPFC_NVME_ABORT_OP)) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
ctxp->flag &= ~LPFC_NVME_XBUSY;
- spin_unlock(&ctxp->ctxlock);
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
- iflag);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1b248c237be1..f905a53d050f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -683,7 +683,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
- tag = blk_mq_unique_tag(cmnd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
idx = blk_mq_unique_tag_to_hwq(tag);
} else {
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
@@ -1046,7 +1046,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
sgpe = scsi_prot_sglist(sc);
- lba = t10_pi_ref_tag(sc->request);
+ lba = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
if (lba == LPFC_INVALID_REFTAG)
return 0;
@@ -1629,7 +1629,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -1792,7 +1792,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -2023,7 +2023,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -2224,7 +2224,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -2818,7 +2818,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- start_ref_tag = t10_pi_ref_tag(cmd->request);
+ start_ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(cmd));
if (start_ref_tag == LPFC_INVALID_REFTAG)
goto out;
start_app_tag = src->app_tag;
@@ -2910,7 +2910,7 @@ out:
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
@@ -2920,7 +2920,7 @@ out:
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
@@ -2930,7 +2930,7 @@ out:
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9041 BLKGRD: reftag %x app_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
app_tag, start_app_tag);
}
}
@@ -2992,7 +2992,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3007,7 +3007,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3022,7 +3022,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3066,7 +3066,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
/* Calcuate what type of error it was */
lpfc_calc_bg_err(phba, lpfc_cmd);
@@ -3103,8 +3103,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9072 BLKGRD: Invalid BG Profile in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
ret = (-1);
goto out;
}
@@ -3115,8 +3115,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9073 BLKGRD: Invalid BG PDIF Block in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
ret = (-1);
goto out;
}
@@ -3131,8 +3131,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9055 BLKGRD: Guard Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3146,8 +3146,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9056 BLKGRD: Ref Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3161,8 +3161,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9061 BLKGRD: App Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
}
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3205,8 +3205,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9057 BLKGRD: Unknown error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm);
/* Calcuate what type of error it was */
lpfc_calc_bg_err(phba, lpfc_cmd);
@@ -5029,12 +5029,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
}
/* Check for valid Emulex Device ID */
- switch (ptr->device) {
- case PCI_DEVICE_ID_LANCER_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
- break;
- default:
+ if (phba->sli_rev != LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8347 Incapable PCI reset device: "
"0x%04x\n", ptr->device);
@@ -5423,13 +5419,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
*/
static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
{
- char *uuid = NULL;
+ struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
- if (cmd->request) {
- if (cmd->request->bio)
- uuid = blkcg_get_fc_appid(cmd->request->bio);
- }
- return uuid;
+ return bio ? blkcg_get_fc_appid(bio) : NULL;
}
/**
@@ -5557,8 +5549,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"reftag x%x cnt %u pt %x\n",
dif_op_str[scsi_get_prot_op(cmnd)],
cmnd->cmnd[0],
- t10_pi_ref_tag(cmnd->request),
- blk_rq_sectors(cmnd->request),
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmnd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmnd)),
(cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5569,8 +5561,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
"x%x reftag x%x cnt %u pt %x\n",
cmnd->cmnd[0],
- t10_pi_ref_tag(cmnd->request),
- blk_rq_sectors(cmnd->request),
+ t10_pi_ref_tag(scsi_cmd_to_rq(cmnd)),
+ blk_rq_sectors(scsi_cmd_to_rq(cmnd)),
(cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5641,8 +5633,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
bf_get(wqe_tmo,
&lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)
- (cmnd->request->timeout / 1000));
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
@@ -6273,6 +6264,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_event_header scsi_event;
int status;
u32 logit = LOG_FCP;
+ u32 dev_loss_tmo = vport->cfg_devloss_tmo;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -6314,39 +6306,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
- if (status != SUCCESS)
- logit = LOG_TRACE_EVENT;
- spin_lock_irqsave(&pnode->lock, flags);
- if (status != SUCCESS &&
- (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
- !pnode->logo_waitq) {
- pnode->logo_waitq = &waitq;
- pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- pnode->nlp_flag |= NLP_ISSUE_LOGO;
- pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
- spin_unlock_irqrestore(&pnode->lock, flags);
- lpfc_unreg_rpi(vport, pnode);
- wait_event_timeout(waitq,
- (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
- msecs_to_jiffies(vport->cfg_devloss_tmo *
- 1000));
-
- if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0725 SCSI layer TGTRST failed & LOGO TMO "
- " (%d, %llu) return x%x\n", tgt_id,
- lun_id, status);
- spin_lock_irqsave(&pnode->lock, flags);
- pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ if (status != SUCCESS) {
+ logit = LOG_TRACE_EVENT;
+
+ /* Issue LOGO, if no LOGO is outstanding */
+ spin_lock_irqsave(&pnode->lock, flags);
+ if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) &&
+ !pnode->logo_waitq) {
+ pnode->logo_waitq = &waitq;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ pnode->nlp_flag |= NLP_ISSUE_LOGO;
+ pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ lpfc_unreg_rpi(vport, pnode);
+ wait_event_timeout(waitq,
+ (!(pnode->upcall_flags &
+ NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(dev_loss_tmo *
+ 1000));
+
+ if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
+ "0725 SCSI layer TGTRST "
+ "failed & LOGO TMO (%d, %llu) "
+ "return x%x\n",
+ tgt_id, lun_id, status);
+ spin_lock_irqsave(&pnode->lock, flags);
+ pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ } else {
+ spin_lock_irqsave(&pnode->lock, flags);
+ }
+ pnode->logo_waitq = NULL;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = SUCCESS;
+
} else {
- spin_lock_irqsave(&pnode->lock, flags);
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = FAILED;
}
- pnode->logo_waitq = NULL;
- spin_unlock_irqrestore(&pnode->lock, flags);
- status = SUCCESS;
- } else {
- status = FAILED;
- spin_unlock_irqrestore(&pnode->lock, flags);
}
lpfc_printf_vlog(vport, KERN_ERR, logit,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index f76667b7da7b..3836d7f6a575 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt {
#define FC_PORTSPEED_128GBIT 0x2000
#endif
+#ifndef FC_PORTSPEED_256GBIT
+#define FC_PORTSPEED_256GBIT 0x4000
+#endif
+
#define TXRDY_PAYLOAD_LEN 12
/* For sysfs/debugfs tmp string max len */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f530d8fe7a8c..47dd13719901 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5674,16 +5674,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
phba->sli4_hba.lnk_info.lnk_no =
bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+ phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
+ phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
+ "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
+ "flash_id: x%02x, asic_rev: x%02x\n",
phba->sli4_hba.lnk_info.lnk_tp,
phba->sli4_hba.lnk_info.lnk_no,
- phba->BIOSVersion);
+ phba->BIOSVersion, phba->sli4_hba.flash_id,
+ phba->sli4_hba.asic_rev);
out_free_mboxq:
if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -8790,8 +8794,11 @@ static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mboxq;
int rc = 0;
unsigned long timeout = 0;
+ u32 sli_flag;
+ u8 cmd, subsys, opcode;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
@@ -8809,12 +8816,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
if (timeout)
lpfc_sli4_process_missed_mbox_completions(phba);
- /* Wait for the outstnading mailbox command to complete */
+ /* Wait for the outstanding mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
- /* Timeout, marked the outstanding cmd not complete */
+ /* Timeout, mark the outstanding cmd not complete */
+
+ /* Sanity check sli.mbox_active has not completed or
+ * cancelled from another context during last 2ms sleep,
+ * so take hbalock to be sure before logging.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ cmd = mboxq->u.mb.mbxCommand;
+ subsys = lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq);
+ sli_flag = psli->sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2352 Mailbox command x%x "
+ "(x%x/x%x) sli_flag x%x could "
+ "not complete\n",
+ cmd, subsys, opcode,
+ sli_flag);
+ } else {
+ spin_unlock_irq(&phba->hbalock);
+ }
+
rc = 1;
break;
}
@@ -10097,8 +10129,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
@@ -11619,6 +11649,7 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
@@ -11627,11 +11658,16 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
- lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
+ /*
+ * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
+ * if exchange is busy.
+ */
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
+
+ lpfc_nlp_put(ndlp);
}
/**
@@ -20021,6 +20057,91 @@ out:
}
/**
+ * lpfc_log_fw_write_cmpl - logs firmware write completion status
+ * @phba: pointer to lpfc hba data structure
+ * @shdr_status: wr_object rsp's status field
+ * @shdr_add_status: wr_object rsp's add_status field
+ * @shdr_add_status_2: wr_object rsp's add_status_2 field
+ * @shdr_change_status: wr_object rsp's change_status field
+ * @shdr_csf: wr_object rsp's csf bit
+ *
+ * This routine is intended to be called after a firmware write completes.
+ * It will log next action items to be performed by the user to instantiate
+ * the newly downloaded firmware or reason for incompatibility.
+ **/
+static void
+lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
+ u32 shdr_add_status, u32 shdr_add_status_2,
+ u32 shdr_change_status, u32 shdr_csf)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "4198 %s: flash_id x%02x, asic_rev x%02x, "
+ "status x%02x, add_status x%02x, add_status_2 x%02x, "
+ "change_status x%02x, csf %01x\n", __func__,
+ phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ shdr_change_status, shdr_csf);
+
+ if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
+ switch (shdr_add_status_2) {
+ case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4199 Firmware write failed: "
+ "image incompatible with flash x%02x\n",
+ phba->sli4_hba.flash_id);
+ break;
+ case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4200 Firmware write failed: "
+ "image incompatible with ASIC "
+ "architecture x%02x\n",
+ phba->sli4_hba.asic_rev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4210 Firmware write failed: "
+ "add_status_2 x%02x\n",
+ shdr_add_status_2);
+ break;
+ }
+ } else if (!shdr_status && !shdr_add_status) {
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
+ switch (shdr_change_status) {
+ case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3198 Firmware write complete: System "
+ "reboot required to instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_FW_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3199 Firmware write complete: "
+ "Firmware reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3200 Firmware write complete: Port "
+ "Migration or PCI Reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PCI_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3201 Firmware write complete: PCI "
+ "Reset required to instantiate\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* lpfc_wr_object - write an object to the firmware
* @phba: HBA structure that indicates port to create a queue on.
* @dmabuf_list: list of dmabufs to write to the port.
@@ -20046,7 +20167,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
+ uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+ uint32_t shdr_change_status = 0, shdr_csf = 0;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -20100,58 +20222,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
&wr_object->header.cfg_shdr.response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&wr_object->header.cfg_shdr.response);
+ shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
+ &wr_object->header.cfg_shdr.response);
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
-
- if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
- shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
- shdr_csf = bf_get(lpfc_wr_object_csf,
- &wr_object->u.response);
- if (shdr_csf)
- shdr_change_status =
- LPFC_CHANGE_STATUS_PCI_RESET;
- }
-
- switch (shdr_change_status) {
- case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3198 Firmware write complete: System "
- "reboot required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_FW_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3199 Firmware write complete: Firmware"
- " reset required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3200 Firmware write complete: Port "
- "Migration or PCI Reset required to "
- "instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PCI_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3201 Firmware write complete: PCI "
- "Reset required to instantiate\n");
- break;
- default:
- break;
- }
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
}
+
if (!phba->sli4_hba.intr_enable)
mempool_free(mbox, phba->mbox_mem_pool);
else if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
- if (shdr_status || shdr_add_status || rc) {
+ if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3025 Write Object mailbox failed with "
- "status x%x add_status x%x, mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
+ "status x%x add_status x%x, add_status_2 x%x, "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ rc);
rc = -ENXIO;
*offset = shdr_add_status;
- } else
+ } else {
*offset += wr_object->u.response.actual_write_length;
+ }
+
+ if (rc || check_change_status)
+ lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+ shdr_add_status_2, shdr_change_status,
+ shdr_csf);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 26f19c95380f..f250b666ac57 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -978,6 +978,8 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
#define lpfc_conf_trunk_port3_nd_SHIFT 7
#define lpfc_conf_trunk_port3_nd_MASK 0x1
+ uint8_t flash_id;
+ uint8_t asic_rev;
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2d62fd2a9824..73a5b3bbdacd 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.10"
+#define LPFC_DRIVER_VERSION "14.0.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ec10b2497310..e4298bf4a482 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1451,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* pthru timeout to the os layer timeout value.
*/
if (scp->device->type == TYPE_TAPE) {
- if ((scp->request->timeout / HZ) > 0xFFFF)
+ if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
pthru->timeout = cpu_to_le16(0xFFFF);
else
- pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 06399c026a8d..26d0cf9353dd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -402,7 +402,7 @@ megasas_get_msix_index(struct megasas_instance *instance,
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
} else if (instance->host->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
instance->low_latency_index_start;
@@ -3023,7 +3023,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
io_request->DevHandle = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
pRAID_Context->timeout_value =
- cpu_to_le16 (scmd->request->timeout / HZ);
+ cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -3086,7 +3086,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(scmd);
pd_index = MEGASAS_PD_INDEX(scmd);
- os_timeout_value = scmd->request->timeout / HZ;
+ os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ;
mr_device_priv_data = scmd->device->hostdata;
cmd->pd_interface = mr_device_priv_data->interface_type;
@@ -3381,7 +3381,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
return SCSI_MLQUEUE_HOST_BUSY;
}
- cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+ cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag);
if (!cmd) {
atomic_dec(&instance->fw_outstanding);
@@ -3422,7 +3422,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
*/
if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
r1_cmd = megasas_get_cmd_fusion(instance,
- (scmd->request->tag + instance->max_fw_cmds));
+ scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds);
megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 24ac7ddec749..bc1c32f599de 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -50,7 +50,7 @@ static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
u32 unique_tag;
u16 host_tag, hw_queue;
- unique_tag = blk_mq_unique_tag(scmd->request);
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
if (hw_queue >= mrioc->num_op_reply_q)
@@ -2016,7 +2016,7 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
case SCSI_PROT_DIF_TYPE0:
eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
scsiio_req->cdb.eedp32.primary_reference_tag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd)));
break;
case SCSI_PROT_DIF_TYPE1:
case SCSI_PROT_DIF_TYPE2:
@@ -2024,7 +2024,7 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
MPI3_EEDPFLAGS_CHK_GUARD;
scsiio_req->cdb.eedp32.primary_reference_tag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd)));
break;
case SCSI_PROT_DIF_TYPE3:
eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
@@ -3451,7 +3451,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
u16 dev_handle;
u16 host_tag;
u32 scsiio_flags = 0;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
sdev_priv_data = scmd->device->hostdata;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 19b1c0cf5f2a..2528c4e7b0e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode,
"\t\tdefault - default perf_mode is 'balanced'"
);
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+ "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
+ "when poll_queues are enabled then &\n\t\t"
+ "perf_mode is set to latency mode. &\n\t\t"
+ );
+
enum mpt3sas_perf_mode {
MPT_PERF_MODE_DEFAULT = -1,
MPT_PERF_MODE_BALANCED = 0,
@@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work)
* and this call is safe since dead ioc will never return any
* command back from HW.
*/
+ mpt3sas_base_pause_mq_polling(ioc);
ioc->schedule_dead_ioc_flush_running_cmds(ioc);
/*
* Set remove_host flag early since kernel thread will
@@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(
&ioc->ioc_reset_in_progress_lock, flags);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_base_clear_outstanding_commands(ioc);
}
@@ -1548,6 +1558,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
+ * when driver is flushing out the IOs.
+ * @ioc: per adapter object
+ *
+ * Pause polling on the mq poll (io uring) queues when driver is flushing
+ * out the IOs. Otherwise we may see the race condition of completing the same
+ * IO from two paths.
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
+
+ /*
+ * wait for current poll to complete.
+ */
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+ udelay(500);
+ }
+}
+
+/**
+ * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
+}
+
+/**
* mpt3sas_base_mask_interrupts - disable interrupts
* @ioc: per adapter object
*
@@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
}
- if (!reply_q->irq_poll_scheduled) {
+ if (!reply_q->is_iouring_poll_q &&
+ !reply_q->irq_poll_scheduled) {
reply_q->irq_poll_scheduled = true;
irq_poll_sched(&reply_q->irqpoll);
}
@@ -1779,6 +1837,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
}
/**
+ * mpt3sas_blk_mq_poll - poll the blk mq poll queue
+ * @shost: Scsi_Host object
+ * @queue_num: hw ctx queue number
+ *
+ * Return number of entries that has been processed from poll queue.
+ */
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ (struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct adapter_reply_queue *reply_q;
+ int num_entries = 0;
+ int qid = queue_num - ioc->iopoll_q_start_index;
+
+ if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
+ !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
+ return 0;
+
+ reply_q = ioc->io_uring_poll_queues[qid].reply_q;
+
+ num_entries = _base_process_reply_queue(reply_q);
+ atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
+
+ return num_entries;
+}
+
+/**
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
return;
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ if (reply_q->is_iouring_poll_q)
+ continue;
irq_poll_init(&reply_q->irqpoll,
ioc->hba_queue_depth/4, _base_irqpoll);
reply_q->irq_poll_scheduled = false;
@@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+
+ if (reply_q->is_iouring_poll_q) {
+ _base_process_reply_queue(reply_q);
+ continue;
+ }
+
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
if (reply_q->irq_poll_scheduled) {
/* Calling irq_poll_disable will wait for any pending
@@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
+ if (reply_q->is_iouring_poll_q) {
+ kfree(reply_q);
+ continue;
+ }
+
if (ioc->smp_affinity_enable)
irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
reply_q->msix_index), NULL);
@@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
{
struct pci_dev *pdev = ioc->pdev;
struct adapter_reply_queue *reply_q;
- int r;
+ int r, qid;
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
@@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q->msix_index = index;
atomic_set(&reply_q->busy, 0);
+
+ if (index >= ioc->iopoll_q_start_index) {
+ qid = index - ioc->iopoll_q_start_index;
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
+ ioc->driver_name, ioc->id, qid);
+ reply_q->is_iouring_poll_q = 1;
+ ioc->io_uring_poll_queues[qid].reply_q = reply_q;
+ goto out;
+ }
+
+
if (ioc->msix_enable)
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
ioc->driver_name, ioc->id, index);
@@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
kfree(reply_q);
return -EBUSY;
}
-
+out:
INIT_LIST_HEAD(&reply_q->list);
list_add_tail(&reply_q->list, &ioc->reply_queue_list);
return 0;
@@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
unsigned int cpu, nr_cpus, nr_msix, index = 0;
struct adapter_reply_queue *reply_q;
int local_numa_node;
+ int iopoll_q_count = ioc->reply_queue_count -
+ ioc->iopoll_q_start_index;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
const cpumask_t *mask;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
mask = pci_irq_get_affinity(ioc->pdev,
@@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
fall_back:
cpu = cpumask_first(cpu_online_mask);
- nr_msix -= ioc->high_iops_queues;
+ nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
index = 0;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
unsigned int i, group = nr_cpus / nr_msix;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
if (cpu >= nr_cpus)
@@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
{
u16 lnksta, speed;
+ /*
+ * Disable high iops queues if io uring poll queues are enabled.
+ */
if (perf_mode == MPT_PERF_MODE_IOPS ||
- perf_mode == MPT_PERF_MODE_LATENCY) {
+ perf_mode == MPT_PERF_MODE_LATENCY ||
+ ioc->io_uring_poll_queues) {
ioc->high_iops_queues = 0;
return;
}
@@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
return;
pci_free_irq_vectors(ioc->pdev);
ioc->msix_enable = 0;
+ kfree(ioc->io_uring_poll_queues);
}
/**
@@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
int i, irq_flags = PCI_IRQ_MSIX;
struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
struct irq_affinity *descp = &desc;
+ /*
+ * Don't allocate msix vectors for poll_queues.
+ * msix_vectors is always within a range of FW supported reply queue.
+ */
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+
if (ioc->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
else
descp = NULL;
- ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->reply_queue_count);
+ ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
+ ioc->reply_queue_count, nr_msix_vectors);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->reply_queue_count, irq_flags, descp);
+ nr_msix_vectors, irq_flags, descp);
return i;
}
@@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
+ int iopoll_q_count = 0;
ioc->msix_load_balance = false;
@@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
ioc->cpu_count, max_msix_vectors);
- if (ioc->is_aero_ioc)
- _base_check_and_enable_high_iops_queues(ioc,
- ioc->msix_vector_count);
+
ioc->reply_queue_count =
- min_t(int, ioc->cpu_count + ioc->high_iops_queues,
- ioc->msix_vector_count);
+ min_t(int, ioc->cpu_count, ioc->msix_vector_count);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
local_max_msix_vectors = max_msix_vectors;
- if (local_max_msix_vectors > 0)
- ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
- ioc->reply_queue_count);
- else if (local_max_msix_vectors == 0)
+ if (local_max_msix_vectors == 0)
goto try_ioapic;
/*
@@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
if (ioc->msix_load_balance)
ioc->smp_affinity_enable = 0;
+ if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
+ ioc->shost->host_tagset = 0;
+
+ /*
+ * Enable io uring poll queues only if host_tagset is enabled.
+ */
+ if (ioc->shost->host_tagset)
+ iopoll_q_count = poll_queues;
+
+ if (iopoll_q_count) {
+ ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
+ sizeof(struct io_uring_poll_queue), GFP_KERNEL);
+ if (!ioc->io_uring_poll_queues)
+ iopoll_q_count = 0;
+ }
+
+ if (ioc->is_aero_ioc)
+ _base_check_and_enable_high_iops_queues(ioc,
+ ioc->msix_vector_count);
+
+ /*
+ * Add high iops queues count to reply queue count if high iops queues
+ * are enabled.
+ */
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + ioc->high_iops_queues,
+ ioc->msix_vector_count);
+
+ /*
+ * Adjust the reply queue count incase reply queue count
+ * exceeds the user provided MSIx vectors count.
+ */
+ if (local_max_msix_vectors > 0)
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
+ ioc->reply_queue_count);
+ /*
+ * Add io uring poll queues count to reply queues count
+ * if io uring is enabled in driver.
+ */
+ if (iopoll_q_count) {
+ if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
+ iopoll_q_count = 0;
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + iopoll_q_count,
+ ioc->msix_vector_count);
+ }
+
+ /*
+ * Starting index of io uring poll queues in reply queue list.
+ */
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+
r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
goto try_ioapic;
}
+ /*
+ * Adjust the reply queue count if the allocated
+ * MSIx vectors is less then the requested number
+ * of MSIx vectors.
+ */
+ if (r < ioc->iopoll_q_start_index) {
+ ioc->reply_queue_count = r + iopoll_q_count;
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+ }
+
ioc->msix_enable = 1;
- ioc->reply_queue_count = r;
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
@@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->high_iops_queues = 0;
ioc_info(ioc, "High IOPs queues : disabled\n");
ioc->reply_queue_count = 1;
+ ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
dfailprintk(ioc,
@@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
u64 pio_chip = 0;
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
+ int iopoll_q_count = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_fail;
+ iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ for (i = 0; i < iopoll_q_count; i++) {
+ atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
+ atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
+ }
+
if (!ioc->is_driver_loading)
_base_init_irqpolls(ioc);
/* Use the Combined reply queue feature only for SAS3 C0 & higher
@@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* 4)));
}
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
+ pr_info("%s: enabled: index: %d\n",
+ reply_q->name, reply_q->msix_index);
+ continue;
+ }
+
pr_info("%s: %s enabled: IRQ %d\n",
reply_q->name,
ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ }
ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
&chip_phys, ioc->chip, memap_sz);
@@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
if (scmd && ioc->shost->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
return blk_mq_unique_tag_to_hwq(tag) +
ioc->high_iops_queues;
@@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 smid;
u32 tag, unique_tag;
- unique_tag = blk_mq_unique_tag(scmd->request);
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
tag = blk_mq_unique_tag_to_tag(unique_tag);
/*
@@ -5169,6 +5366,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
+ * - On failure set default QD values.
+ * @ioc : per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ int sz;
+ int rc = 0;
+
+ ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
+ ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
+ if (!ioc->is_gen35_ioc)
+ goto out;
+ /* sas iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return rc;
+ }
+ rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz);
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->max_wideport_qd =
+ (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
+ le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
+ MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_narrowport_qd =
+ (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
+ le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
+ MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
+ sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+ /* pcie iounit page 1 */
+ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
+ &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
+ (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
+ MPT3SAS_NVME_QUEUE_DEPTH;
+out:
+ dinitprintk(ioc, pr_err(
+ "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
+ ioc->max_wideport_qd, ioc->max_narrowport_qd,
+ ioc->max_sata_qd, ioc->max_nvme_qd));
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc_warn(ioc,
"TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
}
+ rc = _base_assign_fw_reported_qd(ioc);
+ if (rc)
+ return rc;
rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
if (rc)
return rc;
@@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
r = mpt3sas_base_make_ioc_ready(ioc, type);
if (r)
goto out;
@@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
mutex_unlock(&ioc->reset_in_progress_mutex);
+ mpt3sas_base_resume_mq_polling(ioc);
out_unlocked:
if ((r == 0) && is_trigger) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 0c6c3df0038d..f87c0911f66a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,9 +77,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "37.101.00.00"
-#define MPT3SAS_MAJOR_VERSION 37
-#define MPT3SAS_MINOR_VERSION 101
+#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 39
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_MIN_IRQS 1
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -575,6 +576,7 @@ struct _sas_device {
u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
+ u8 port_type;
struct hba_port *port;
struct sas_rphy *rphy;
};
@@ -936,6 +938,8 @@ struct _event_ack_list {
* @os_irq: irq number
* @irqpoll: irq_poll object
* @irq_poll_scheduled: Tells whether irq poll is scheduled or not
+ * @is_iouring_poll_q: Tells whether reply queues is assigned
+ * to io uring poll queues or not
* @list: this list
*/
struct adapter_reply_queue {
@@ -949,9 +953,22 @@ struct adapter_reply_queue {
struct irq_poll irqpoll;
bool irq_poll_scheduled;
bool irq_line_enable;
+ bool is_iouring_poll_q;
struct list_head list;
};
+/**
+ * struct io_uring_poll_queue - the io uring poll queue structure
+ * @busy: Tells whether io uring poll queue is busy or not
+ * @pause: Tells whether IOs are paused on io uring poll queue or not
+ * @reply_q: reply queue mapped for io uring poll queue
+ */
+struct io_uring_poll_queue {
+ atomic_t busy;
+ atomic_t pause;
+ struct adapter_reply_queue *reply_q;
+};
+
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
/* SAS3.0 support */
@@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @iopoll_q_start_index: starting index of io uring poll queues
+ * in reply queue list
* @drv_internal_flags: Bit map internal to driver
* @drv_support_bitmap: driver's supported feature bit map
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
@@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER {
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u8 iopoll_q_start_index;
u32 drv_internal_flags;
u32 drv_support_bitmap;
u32 dma_mask;
bool enable_sdev_max_qd;
bool use_32bit_dma;
+ struct io_uring_poll_queue *io_uring_poll_queues;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER {
u8 tm_custom_handling;
u8 nvme_abort_timeout;
u16 max_shutdown_latency;
+ u16 max_wideport_qd;
+ u16 max_narrowport_qd;
+ u16 max_nvme_qd;
+ u8 max_sata_qd;
/* static config pages */
struct mpt3sas_facts facts;
@@ -1730,10 +1755,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
status, mpi_request, sz); } while (0)
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
-int
-mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
u32 form, u32 handle);
+int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz);
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
u16 sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 83a5c2172ad4..0563078227de 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -1169,6 +1169,43 @@ out:
}
/**
+ * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT;
+ mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b66140e4c370..770b241d7bb2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev,
}
} else if (sas_target_priv_data->flags &
MPT_TARGET_FLAGS_PCIE_DEVICE)
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
else
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
}
@@ -3919,6 +3920,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(sas_device_handle);
/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
* sas_ncq_prio_enable_show - send prioritized io commands to device
* @dev: pointer to embedded device
* @attr: ?
@@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable);
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_supported,
&dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8e64a6f14542..cbc7b3d9d913 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* limit max device queue for SATA to 32 if enable_sdev_max_qd
* is disabled.
*/
- if (ioc->enable_sdev_max_qd)
+ if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
goto not_sata;
sas_device_priv_data = sdev->hostdata;
@@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev)
return 1;
}
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
ds = "NVMe";
sdev_printk(KERN_INFO, sdev,
"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
@@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device->volume_handle = volume_handle;
sas_device->volume_wwid = volume_wwid;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
@@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev)
} else
ds = "SSP";
} else {
- qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ qdepth = ioc->max_sata_qd;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
ds = "STP";
else if (sas_device->device_info &
@@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
"scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
- (scmd->request->timeout / HZ) * 1000);
+ (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
@@ -5074,7 +5075,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd)));
break;
case SCSI_PROT_DIF_TYPE3:
@@ -5141,7 +5142,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int class;
Mpi25SCSIIORequest_t *mpi_request;
struct _pcie_device *pcie_device = NULL;
@@ -7371,6 +7372,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+ sas_device->port_type = sas_device_pg0.MaxPortConnections;
+ ioc_info(ioc,
+ "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
+ handle, sas_device->sas_address, sas_device->port_type);
if (ioc->wait_for_discovery_to_complete)
_scsih_sas_device_init_add(ioc, sas_device);
@@ -9604,6 +9609,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_update_device_qdepth - Update QD during Reset.
+ * @ioc: per adapter object
+ *
+ */
+static void
+_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ ioc_info(ioc, "Update devices with firmware reported queue depth\n");
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target) {
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ sas_device = sas_device_priv_data->sas_target->sas_dev;
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
+ qdepth = ioc->max_nvme_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ qdepth = ioc->max_sata_qd;
+ else
+ continue;
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ }
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_device_pg0: SAS Device page 0
@@ -10654,6 +10695,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_remove_unresponding_devices(ioc);
_scsih_del_dirty_vphy(ioc);
_scsih_del_dirty_port_entries(ioc);
+ if (ioc->is_gen35_ioc)
+ _scsih_update_device_qdepth(ioc);
_scsih_scan_for_devices_after_reset(ioc);
/*
* If diag reset has occurred during the driver load
@@ -11178,8 +11221,10 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -11274,8 +11319,10 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -11785,12 +11832,41 @@ static int scsih_map_queues(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct blk_mq_queue_map *map;
+ int i, qoff, offset;
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+ int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
- if (ioc->shost->nr_hw_queues == 1)
+ if (shost->nr_hw_queues == 1)
return 0;
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ioc->pdev, ioc->high_iops_queues);
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ map = &shost->tag_set.map[i];
+ map->nr_queues = 0;
+ offset = 0;
+ if (i == HCTX_TYPE_DEFAULT) {
+ map->nr_queues =
+ nr_msix_vectors - ioc->high_iops_queues;
+ offset = ioc->high_iops_queues;
+ } else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = iopoll_q_count;
+
+ if (!map->nr_queues)
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+ return 0;
}
/* shost template for SAS 2.0 HBA devices */
@@ -11861,6 +11937,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
.map_queues = scsih_map_queues,
+ .mq_poll = mpt3sas_blk_mq_poll,
};
/* raid transport support for SAS 3.0 HBA devices */
@@ -11957,6 +12034,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost = NULL;
int rv;
u16 hba_mpi_version;
+ int iopoll_q_count = 0;
/* Determine in which MPI version class this pci device belongs */
hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
@@ -12204,6 +12282,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_thread_fail;
}
+ shost->host_tagset = 0;
+
+ if (ioc->is_gen35_ioc && host_tagset_enable)
+ shost->host_tagset = 1;
+
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -12226,16 +12309,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else
ioc->hide_drives = 0;
- shost->host_tagset = 0;
shost->nr_hw_queues = 1;
- if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
- host_tagset_enable && ioc->smp_affinity_enable) {
-
- shost->host_tagset = 1;
+ if (shost->host_tagset) {
shost->nr_hw_queues =
ioc->reply_queue_count - ioc->high_iops_queues;
+ iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+
+ shost->nr_maps = iopoll_q_count ? 3 : 1;
+
dev_info(&ioc->pdev->dev,
"Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
shost->can_queue, shost->nr_hw_queues);
@@ -12359,6 +12443,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* Permanent error, prepare for device removal */
ioc->pci_error_recovery = 1;
mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 6bb03d7a254d..4d251bf630a3 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
- scmd->request->tag, scmd->cmnd[0], scmd->retries);
+ scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
return mhba->instancet->reset_host(mhba);
}
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 542ed88ef90d..a4a88323e020 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1263,6 +1263,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd)
static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrb_hba *cb = shost_priv(shost);
struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1286,7 +1287,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
}
mbox->type3.opcode = MYRB_CMD_DCDB;
- mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.id = rq->tag + 3;
mbox->type3.addr = dcdb_addr;
dcdb->channel = sdev->channel;
dcdb->target = sdev->id;
@@ -1305,11 +1306,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
break;
}
dcdb->early_status = false;
- if (scmd->request->timeout <= 10)
+ if (rq->timeout <= 10)
dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
- else if (scmd->request->timeout <= 60)
+ else if (rq->timeout <= 60)
dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
- else if (scmd->request->timeout <= 600)
+ else if (rq->timeout <= 600)
dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
else
dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
@@ -1550,7 +1551,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
}
myrb_reset_cmd(cmd_blk);
- mbox->type5.id = scmd->request->tag + 3;
+ mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
if (scmd->sc_data_direction == DMA_NONE)
goto submit;
nsge = scsi_dma_map(scmd);
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 26326af23dbc..07f274afd7e5 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1582,6 +1582,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
static int myrs_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrs_hba *cs = shost_priv(shost);
struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1628,7 +1629,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
return SCSI_MLQUEUE_HOST_BUSY;
cmd_blk->sense_addr = sense_addr;
- timeout = scmd->request->timeout;
+ timeout = rq->timeout;
if (scmd->cmd_len <= 10) {
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
struct myrs_ldev_info *ldev_info = sdev->hostdata;
@@ -1644,10 +1645,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_10.pdev.target = sdev->id;
mbox->SCSI_10.pdev.channel = sdev->channel;
}
- mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.id = rq->tag + 3;
mbox->SCSI_10.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_10.control.fua = true;
mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
@@ -1690,10 +1691,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_255.pdev.target = sdev->id;
mbox->SCSI_255.pdev.channel = sdev->channel;
}
- mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.id = rq->tag + 3;
mbox->SCSI_255.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_255.control.fua = true;
mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c76e9f05d042..09958f78b70f 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4164,8 +4164,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->request->timeout >= HZ) {
- u_long tlimit = jiffies + cmd->request->timeout - HZ;
+ if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) {
+ u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
index e42acf314d06..33df6a9ba9b5 100644
--- a/drivers/scsi/pcmcia/fdomain_cs.c
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
goto fail_disable;
if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
- "fdomain_cs"))
+ "fdomain_cs")) {
+ ret = -EBUSY;
goto fail_disable;
+ }
sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
if (!sh) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 17c0f26e683a..63690508313b 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1323,7 +1323,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
void *pMessage;
unsigned long flags;
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
- int rv = -1;
+ int rv;
WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6b5b6a75ac88..3404782988d5 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1162,13 +1162,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->request) {
- QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
- "sc_cmd=%p.\n", sc_cmd);
- return;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
"is not valid, sc_cmd=%p.\n", sc_cmd);
return;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 71333d3c5c86..ac99e980bb31 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -609,14 +609,7 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
goto error;
}
- if (!sc_cmd->request) {
- QEDI_WARN(&qedi->dbg_ctx,
- "sc_cmd->request is NULL, sc_cmd=%p.\n",
- sc_cmd);
- goto error;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDI_WARN(&qedi->dbg_ctx,
"request->q is NULL so request is not valid, sc_cmd=%p.\n",
sc_cmd);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 928da90b79be..9f9b4900c3ab 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -490,7 +490,6 @@ __setup("qla1280=", qla1280_setup);
#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
#define CMD_RESULT(Cmnd) Cmnd->result
#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
-#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
#define CMD_HOST(Cmnd) Cmnd->device->host
#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
@@ -2827,7 +2826,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3082,7 +3081,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 17d5bc1cc56b..cbc1303e761e 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+ qla_edif.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3aa9869f6fae..22191e9a04a0 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2435,6 +2435,7 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+static DEVICE_ATTR_RO(edif_doorbell);
struct device_attribute *qla2x00_host_attrs[] = {
@@ -2480,6 +2481,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no,
&dev_attr_fw_attr,
&dev_attr_dport_diagnostics,
+ &dev_attr_edif_doorbell,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
@@ -3107,6 +3109,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
qla2x00_wait_for_sess_deletion(vha);
qla_nvme_delete(vha);
+ qla_enode_stop(vha);
+ qla_edb_stop(vha);
+
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index d42b2ad84049..4b5d28d89d69 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -25,6 +25,10 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ ql_dbg(ql_dbg_user, sp->vha, 0x7009,
+ "%s: sp hdl %x, result=%x bsg ptr %p\n",
+ __func__, sp->handle, res, bsg_job);
+
sp->free(sp);
bsg_reply->result = res;
@@ -53,11 +57,19 @@ void qla2x00_bsg_sp_free(srb_t *sp)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (sp->remap.remapped) {
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+ } else {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
}
if (sp->type == SRB_CT_CMD ||
@@ -266,6 +278,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
+ uint32_t els_cmd = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
@@ -279,6 +292,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
+ els_cmd = bsg_request->rqst_data.h_els.command_code;
+ if (els_cmd == ELS_AUTH_ELS)
+ return qla_edif_process_els(vha, bsg_job);
}
if (!vha->flags.online) {
@@ -2768,10 +2784,13 @@ qla2x00_manage_host_port(struct bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
{
struct fc_bsg_request *bsg_request = bsg_job->request;
+ ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
+ __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
+
switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2840,6 +2859,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_EDIF_MGMT:
+ return qla_edif_app_mgmt(bsg_job);
+
case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
return qla2x00_get_flash_image_status(bsg_job);
@@ -2897,12 +2919,19 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
return -EBUSY;
}
+ if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
skip_chip_chk:
- ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "Entered %s msgcode=0x%x. bsg ptr %px\n",
+ __func__, bsg_request->msgcode, bsg_job);
switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
@@ -2913,7 +2942,7 @@ skip_chip_chk:
ret = qla2x00_process_ct(bsg_job);
break;
case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
+ ret = qla2x00_process_vendor_specific(vha, bsg_job);
break;
case FC_BSG_HST_ADD_RPORT:
case FC_BSG_HST_DEL_RPORT:
@@ -2922,6 +2951,10 @@ skip_chip_chk:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
+
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "%s done with return %x\n", __func__, ret);
+
return ret;
}
@@ -2936,6 +2969,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
unsigned long flags;
struct req_que *req;
+ ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+ __func__, bsg_job);
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2945,27 +2980,26 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
- if (sp) {
- if (((sp->type == SRB_CT_CMD) ||
- (sp->type == SRB_ELS_CMD_HST) ||
- (sp->type == SRB_FXIOCB_BCMD))
- && (sp->u.bsg_job == bsg_job)) {
- req->outstanding_cmds[cnt] = NULL;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_log(ql_log_warn, vha, 0x7089,
- "mbx abort_command "
- "failed.\n");
- bsg_reply->result = -EIO;
- } else {
- ql_dbg(ql_dbg_user, vha, 0x708a,
- "mbx abort_command "
- "success.\n");
- bsg_reply->result = 0;
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- goto done;
+ if (sp &&
+ (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_ELS_CMD_HST ||
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+ sp->type == SRB_FXIOCB_BCMD) &&
+ sp->u.bsg_job == bsg_job) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command failed.\n");
+ bsg_reply->result = -EIO;
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command success.\n");
+ bsg_reply->result = 0;
}
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ goto done;
+
}
}
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0274e99e4a12..dd793cf8bc1e 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -31,6 +31,7 @@
#define QL_VND_DPORT_DIAGNOSTICS 0x19
#define QL_VND_GET_PRIV_STATS_EX 0x1A
#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
+#define QL_VND_EDIF_MGMT 0X1F
#define QL_VND_MANAGE_HOST_STATS 0x23
#define QL_VND_GET_HOST_STATS 0x24
#define QL_VND_GET_TGT_STATS 0x25
@@ -294,4 +295,6 @@ struct qla_active_regions {
uint8_t reserved[32];
} __packed;
+#include "qla_edif_bsg.h"
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 9eb708e5e22e..f1f6c740bdcd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -367,6 +367,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
+#define ql_dbg_edif 0x00000400 /* edif and purex debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2f67ec1df3e6..af0e8be0eb9b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -49,6 +49,28 @@ typedef struct {
uint8_t domain;
} le_id_t;
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
#include "qla_bsg.h"
#include "qla_dsd.h"
#include "qla_nx.h"
@@ -319,6 +341,13 @@ struct name_list_extended {
u32 size;
u8 sent;
};
+
+struct els_reject {
+ struct fc_els_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
/*
* Timeout timer counts in seconds
*/
@@ -345,6 +374,8 @@ struct name_list_extended {
#define FW_MAX_EXCHANGES_CNT (32 * 1024)
#define REDUCE_EXCHANGES_CNT (8 * 1024)
+#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16)
+
struct req_que;
struct qla_tgt_sess;
@@ -370,32 +401,10 @@ struct srb_cmd {
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
#define SRB_WAKEUP_ON_COMP BIT_6
#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */
+#define SRB_EDIF_CLEANUP_DELETE BIT_9
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
-
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
#define ISP_REG16_DISCONNECT 0xFFFF
static inline le_id_t be_id_to_le(be_id_t id)
@@ -483,6 +492,7 @@ struct srb_iocb {
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
#define SRB_LOGIN_PRLI_ONLY BIT_4
+#define SRB_LOGIN_FCSP BIT_5
uint16_t data[2];
u32 iop[2];
} logio;
@@ -587,6 +597,10 @@ struct srb_iocb {
u16 cmd;
u16 vp_index;
} ctrlvp;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame sa_frame;
+ } sa_update;
} u;
struct timer_list timer;
@@ -617,6 +631,21 @@ struct srb_iocb {
#define SRB_PRLI_CMD 21
#define SRB_CTRL_VP 22
#define SRB_PRLO_CMD 23
+#define SRB_SA_UPDATE 25
+#define SRB_ELS_CMD_HST_NOLOGIN 26
+#define SRB_SA_REPLACE 27
+
+struct qla_els_pt_arg {
+ u8 els_opcode;
+ u8 vp_idx;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le32 rx_xchg_address;
+ port_id_t did;
+ u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+
+};
enum {
TYPE_SRB,
@@ -630,6 +659,13 @@ struct iocb_resource {
u16 iocb_cnt;
};
+struct bsg_cmd {
+ struct bsg_job *bsg_job;
+ union {
+ struct qla_els_pt_arg els_arg;
+ } u;
+};
+
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
@@ -662,7 +698,21 @@ typedef struct srb {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
+ struct bsg_cmd bsg_cmd;
} u;
+ struct {
+ bool remapped;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } req;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } rsp;
+ } remap;
/*
* Report completion status @res and call sp_put(@sp). @res is
* an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
@@ -2294,6 +2344,7 @@ struct imm_ntfy_from_isp {
__le16 nport_handle;
uint16_t reserved_2;
__le16 flags;
+#define NOTIFY24XX_FLAGS_FCSP BIT_5
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
__le16 srr_rx_id;
@@ -2424,6 +2475,7 @@ enum discovery_state {
DSC_LOGIN_COMPLETE,
DSC_ADISC,
DSC_DELETE_PEND,
+ DSC_LOGIN_AUTH_PEND,
};
enum login_state { /* FW control Target side */
@@ -2563,6 +2615,34 @@ typedef struct fc_port {
u64 tgt_short_link_down_cnt;
u64 tgt_link_down_time;
u64 dev_loss_tmo;
+ /*
+ * EDIF parameters for encryption.
+ */
+ struct {
+ uint32_t enable:1; /* device is edif enabled/req'd */
+ uint32_t app_stop:2;
+ uint32_t app_started:1;
+ uint32_t secured_login:1;
+ uint32_t aes_gmac:1;
+ uint32_t app_sess_online:1;
+ uint32_t tx_sa_set:1;
+ uint32_t rx_sa_set:1;
+ uint32_t tx_sa_pending:1;
+ uint32_t rx_sa_pending:1;
+ uint32_t tx_rekey_cnt;
+ uint32_t rx_rekey_cnt;
+ uint64_t tx_bytes;
+ uint64_t rx_bytes;
+ uint8_t non_secured_login;
+ uint8_t auth_state;
+ uint16_t rekey_cnt;
+ struct list_head edif_indx_list;
+ spinlock_t indx_list_lock;
+
+ struct list_head tx_sa_list;
+ struct list_head rx_sa_list;
+ spinlock_t sa_list_lock;
+ } edif;
} fc_port_t;
enum {
@@ -2604,7 +2684,8 @@ static const char * const port_dstate_str[] = {
"UPD_FCPORT",
"LOGIN_COMPLETE",
"ADISC",
- "DELETE_PEND"
+ "DELETE_PEND",
+ "LOGIN_AUTH_PEND",
};
/*
@@ -2616,6 +2697,8 @@ static const char * const port_dstate_str[] = {
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
#define FCF_ASYNC_ACTIVE BIT_5
+#define FCF_FCSP_DEVICE BIT_6
+#define FCF_EDIF_DELETE BIT_7
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -3386,6 +3469,7 @@ enum qla_work_type {
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
QLA_EVT_ELS_PLOGI,
+ QLA_EVT_SA_REPLACE,
};
@@ -3444,6 +3528,11 @@ struct qla_work_evt {
u8 fc4_type;
srb_t *sp;
} gpnft;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ fc_port_t *fcport;
+ uint16_t nport_handle;
+ } sa_update;
} u;
};
@@ -3845,7 +3934,6 @@ struct qlt_hw_data {
int num_act_qpairs;
#define DEFAULT_NAQP 2
spinlock_t atio_lock ____cacheline_aligned;
- struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3935,6 +4023,7 @@ struct qla_hw_data {
uint32_t scm_supported_f:1;
/* Enabled in Driver */
uint32_t scm_enabled:1;
+ uint32_t edif_enabled:1;
uint32_t max_req_queue_warned:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
@@ -4619,8 +4708,23 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
u64 prev_cmd_cnt;
+ struct dma_pool *purex_dma_pool;
+ struct btree_head32 host_map;
+
+#define EDIF_NUM_SA_INDEX 512
+#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX
+ void *edif_rx_sa_id_map;
+ void *edif_tx_sa_id_map;
+ spinlock_t sadb_fp_lock;
+
+ struct list_head sadb_tx_index_list;
+ struct list_head sadb_rx_index_list;
+ spinlock_t sadb_lock; /* protects list */
+ struct els_reject elsrej;
};
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
struct active_regions {
uint8_t global;
struct {
@@ -4659,6 +4763,8 @@ struct purex_item {
} iocb;
};
+#include "qla_edif.h"
+
#define SCM_FLAG_RDF_REJECT 0x00
#define SCM_FLAG_RDF_COMPLETED 0x01
@@ -4888,6 +4994,8 @@ typedef struct scsi_qla_host {
u64 reset_cmd_err_cnt;
u64 link_down_time;
u64 short_link_down_cnt;
+ struct edif_dbell e_dbell;
+ struct pur_core pur_cinfo;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5088,6 +5196,43 @@ enum nexus_wait_type {
WAIT_LUN,
};
+#define INVALID_EDIF_SA_INDEX 0xffff
+#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe
+
+#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE
+
+/* edif hash element */
+struct edif_list_entry {
+ uint16_t handle; /* nport_handle */
+ uint32_t update_sa_index;
+ uint32_t delete_sa_index;
+ uint32_t count; /* counter for filtering sa_index */
+#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */
+ uint32_t flags; /* used by sadb cleanup code */
+ fc_port_t *fcport; /* needed by rx delay timer function */
+ struct timer_list timer; /* rx delay timer */
+ struct list_head next;
+};
+
+#define EDIF_TX_INDX_BASE 512
+#define EDIF_RX_INDX_BASE 0
+#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */
+
+/* entry in the sa_index free pool */
+
+struct sa_index_pair {
+ uint16_t sa_index;
+ uint32_t spi;
+};
+
+/* edif sa_index data structure */
+struct edif_sa_index_entry {
+ struct sa_index_pair sa_pair[2];
+ fc_port_t *fcport;
+ uint16_t handle;
+ struct list_head next;
+};
+
/* Refer to SNIA SFF 8247 */
struct sff_8247_a0 {
u8 txid; /* transceiver id */
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
new file mode 100644
index 000000000000..2db954a7aaf1
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -0,0 +1,3409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#include "qla_def.h"
+#include "qla_edif.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <scsi/scsi_tcq.h>
+
+static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list);
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame);
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index);
+static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
+
+struct edb_node {
+ struct list_head list;
+ uint32_t ntype;
+ union {
+ port_id_t plogi_did;
+ uint32_t async;
+ port_id_t els_sid;
+ struct edif_sa_update_aen sa_aen;
+ } u;
+};
+
+static struct els_sub_cmd {
+ uint16_t cmd;
+ const char *str;
+} sc_str[] = {
+ {SEND_ELS, "send ELS"},
+ {SEND_ELS_REPLY, "send ELS Reply"},
+ {PULL_ELS, "retrieve ELS"},
+};
+
+const char *sc_to_str(uint16_t cmd)
+{
+ int i;
+ struct els_sub_cmd *e;
+
+ for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
+ e = sc_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
+static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
+ uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ struct edif_list_entry *tentry;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* timeout called when no traffic and delayed rx sa_index delete */
+static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
+{
+ struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
+ fc_port_t *fcport = edif_entry->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t nport_handle;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3069,
+ "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
+ __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
+
+ /*
+ * if delete_sa_index is valid then no one has serviced this
+ * delayed delete
+ */
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+
+ /*
+ * delete_sa_index is invalidated when we find the new sa_index in
+ * the incoming data stream. If it is not invalidated then we are
+ * still looking for the new sa_index because there is no I/O and we
+ * need to just force the rx delete and move on. Otherwise
+ * we could get another rekey which will result in an error 66.
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ uint16_t delete_sa_index = edif_entry->delete_sa_index;
+
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ delete_sa_index, 0);
+
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
+ __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
+ nport_handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl not found for delete_sa_index: %d\n",
+ __func__, edif_entry->delete_sa_index);
+ }
+ } else {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ }
+}
+
+/*
+ * create a new list entry for this nport handle and
+ * add an sa_update index to the list - called for sa_update
+ */
+static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
+ uint16_t sa_index, uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ unsigned long flags = 0;
+
+ /* if the entry exists, then just update the sa_index */
+ entry = qla_edif_list_find_sa_index(fcport, handle);
+ if (entry) {
+ entry->update_sa_index = sa_index;
+ entry->count = 0;
+ return 0;
+ }
+
+ /*
+ * This is the normal path - there should be no existing entry
+ * when update is called. The exception is at startup
+ * when update is called for the first two sa_indexes
+ * followed by a delete of the first sa_index
+ */
+ entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = handle;
+ entry->update_sa_index = sa_index;
+ entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ entry->count = 0;
+ entry->flags = 0;
+ timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return 0;
+}
+
+/* remove an entry from the list */
+static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.sa_update.fcport = fcport;
+ e->u.sa_update.sa_ctl = sa_ctl;
+ e->u.sa_update.nport_handle = nport_handle;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
+ fcport->node_name, fcport->port_name, fcport->d_id.b24);
+
+ fcport->edif.tx_rekey_cnt = 0;
+ fcport->edif.rx_rekey_cnt = 0;
+
+ fcport->edif.tx_bytes = 0;
+ fcport->edif.rx_bytes = 0;
+}
+
+static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+fc_port_t *fcport)
+{
+ struct extra_auth_els *p;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct qla_bsg_auth_els_request *req =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ if (!vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_edif, vha, 0x9105,
+ "%s edif not enabled\n", __func__);
+ goto done;
+ }
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ goto done;
+ }
+
+ p = &req->e;
+
+ /* Get response */
+ if (p->sub_cmd == PULL_ELS) {
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ qla_pur_get_pending(vha, fcport, bsg_job);
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
+ __func__, sc_to_str(p->sub_cmd), fcport->port_name,
+ fcport->d_id.b24, rpl->rx_xchg_address,
+ rpl->r.reply_payload_rcv_len, bsg_job);
+
+ goto done;
+ }
+ return 0;
+
+done:
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -EIO;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
+{
+ fc_port_t *f, *tf;
+
+ f = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if ((f->flags & FCF_FCSP_DEVICE)) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
+ "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
+ f->node_name, f->port_name,
+ f->d_id.b24, id->b24);
+ if (f->d_id.b24 == id->b24)
+ return f;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * qla_edif_app_check(): check for valid application id.
+ * @vha: host adapter pointer
+ * @appid: application id
+ * Return: false = fail, true = pass
+ */
+static bool
+qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
+{
+ /* check that the app is allow/known to the driver */
+
+ if (appid.app_vid == EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
+ return true;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+
+ return false;
+}
+
+static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state,
+ int waitonly)
+{
+ int cnt, max_cnt = 200;
+ bool traced = false;
+
+ fcport->keep_nport_handle = 1;
+
+ if (!waitonly) {
+ qla2x00_set_fcport_disc_state(fcport, state);
+ qlt_schedule_sess_for_deletion(fcport);
+ } else {
+ qla2x00_set_fcport_disc_state(fcport, state);
+ }
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waiting for session, max_cnt=%u\n",
+ __func__, max_cnt);
+
+ cnt = 0;
+
+ if (waitonly) {
+ /* Marker wait min 10 msecs. */
+ msleep(50);
+ cnt += 50;
+ }
+ while (1) {
+ if (!traced) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: session sleep.\n",
+ __func__);
+ traced = true;
+ }
+ msleep(20);
+ cnt++;
+ if (waitonly && (fcport->disc_state == state ||
+ fcport->disc_state == DSC_LOGIN_COMPLETE))
+ break;
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+ break;
+ if (cnt > max_cnt)
+ break;
+ }
+
+ if (!waitonly) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+ __func__, fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+ } else {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+ __func__, fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+ }
+}
+
+static void
+qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
+ int index)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ list_del(&sa_ctl->next);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+ if (index >= 512)
+ fcport->edif.tx_rekey_cnt--;
+ else
+ fcport->edif.rx_rekey_cnt--;
+ kfree(sa_ctl);
+}
+
+/* return an index to the freepool */
+static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
+ uint16_t sa_index)
+{
+ void *sa_id_map;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ u16 lsa_index = sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir) {
+ sa_id_map = ha->edif_tx_sa_id_map;
+ lsa_index -= EDIF_TX_SA_INDEX_BASE;
+ } else {
+ sa_id_map = ha->edif_rx_sa_id_map;
+ }
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ clear_bit(lsa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index %d added to free pool\n", __func__, sa_index);
+}
+
+static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
+ struct fc_port *fcport, struct edif_sa_index_entry *entry,
+ int pdir)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ int i, dir;
+ int key_cnt = 0;
+
+ for (i = 0; i < 2; i++) {
+ if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
+ continue;
+
+ if (fcport->loop_id != entry->handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
+ __func__, i, entry->handle, fcport->loop_id,
+ entry->sa_pair[i].sa_index);
+ }
+
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ entry->sa_pair[i].sa_index, pdir);
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
+ }
+
+ /* Release the index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, entry->sa_pair[i].sa_index, entry->handle);
+
+ dir = (entry->sa_pair[i].sa_index <
+ EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ qla_edif_add_sa_index_to_freepool(fcport, dir,
+ entry->sa_pair[i].sa_index);
+
+ /* Delete timer on RX */
+ if (pdir != SAU_FLG_TX) {
+ edif_entry =
+ qla_edif_list_find_sa_index(fcport, entry->handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ /*
+ * valid delete_sa_index indicates there is a rx
+ * delayed delete queued
+ */
+ if (edif_entry->delete_sa_index !=
+ INVALID_EDIF_SA_INDEX) {
+ del_timer(&edif_entry->timer);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+ key_cnt++;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %d %s keys released\n",
+ __func__, key_cnt, pdir ? "tx" : "rx");
+}
+
+/* find an release all outstanding sadb sa_indicies */
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+ struct edif_sa_index_entry *entry, *tmp;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: Starting...\n", __func__);
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ __qla2x00_release_all_sadb(vha, fcport, entry, 0);
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
+
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+}
+
+/**
+ * qla_edif_app_start: application has announce its present
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ *
+ * Set/activate doorbell. Reset current sessions and re-login with
+ * secure flag.
+ */
+static int
+qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_start appstart;
+ struct app_start_reply appreply;
+ struct fc_port *fcport, *tf;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstart,
+ sizeof(struct app_start));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
+ __func__, appstart.app_info.app_vid, appstart.app_start_flags);
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* mark doorbell as active since an app is now present */
+ vha->e_dbell.db_flags = EDB_ACTIVE;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
+ __func__);
+ }
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n",
+ __func__, fcport, fcport->port_name,
+ fcport->loop_id, fcport->d_id.b24,
+ fcport->logout_on_delete);
+
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "keep %d els_logo %d disc state %d auth state %d stop state %d\n",
+ fcport->keep_nport_handle,
+ fcport->send_els_logo, fcport->disc_state,
+ fcport->edif.auth_state, fcport->edif.app_stop);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+
+ fcport->edif.app_started = 1;
+ fcport->edif.app_stop = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+ fcport->edif.app_sess_online = 1;
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+ qla_edif_sa_ctl_init(vha, fcport);
+ }
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* mark as active since an app is now present */
+ vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
+ __func__);
+ }
+
+ appreply.host_support_edif = vha->hw->flags.edif_enabled;
+ appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
+ appreply.edif_edb_active = vha->e_dbell.db_flags;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct app_start_reply);
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &appreply,
+ sizeof(struct app_start_reply));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s app start completed with 0x%x\n",
+ __func__, rval);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_stop - app has announced it's exiting.
+ * @vha: host adapter pointer
+ * @bsg_job: user space command pointer
+ *
+ * Free any in flight messages, clear all doorbell events
+ * to application. Reject any message relate to security.
+ */
+static int
+qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct app_stop appstop;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct fc_port *fcport, *tf;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstop,
+ sizeof(struct app_stop));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
+ __func__, appstop.app_info.app_vid);
+
+ /* Call db stop and enode stop functions */
+
+ /* if we leave this running short waits are operational < 16 secs */
+ qla_enode_stop(vha); /* stop enode */
+ qla_edb_stop(vha); /* stop db */
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.non_secured_login)
+ continue;
+
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
+ __func__, fcport,
+ fcport->port_name, fcport->loop_id, fcport->d_id.b24,
+ fcport->logout_on_delete, fcport->keep_nport_handle,
+ fcport->send_els_logo);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+
+ fcport->edif.app_stop = 1;
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+
+ /* qla_edif_flush_sa_ctl_lists(fcport); */
+ fcport->edif.app_started = 0;
+ }
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ /* no return interface to app - it assumes we cleaned up ok */
+
+ return rval;
+}
+
+static int
+qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct app_plogi_reply *appplogireply)
+{
+ int ret = 0;
+
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ appplogireply->prli_status = 0;
+ ret = 1;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ appplogireply->prli_status = 1;
+ }
+ return ret;
+}
+
+/**
+ * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
+ * with prli
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct auth_complete_cmd appplogiok;
+ struct app_plogi_reply appplogireply = {0};
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogiok,
+ sizeof(struct auth_complete_cmd));
+
+ switch (appplogiok.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogiok.u.wwpn, 0);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s wwpn lookup failed: %8phC\n",
+ __func__, appplogiok.u.wwpn);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s undefined type: %x\n", __func__,
+ appplogiok.type);
+ break;
+ }
+
+ if (!fcport) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto errstate_exit;
+ }
+
+ /*
+ * if port is online then this is a REKEY operation
+ * Only do sa update checking
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s Skipping PRLI complete based on rekey\n", __func__);
+ appplogireply.prli_status = 1;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
+ goto errstate_exit;
+ }
+
+ /* make sure in AUTH_PENDING or else reject */
+ if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC is not in auth pending state (%x)\n",
+ __func__, fcport->port_name, fcport->disc_state);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 1;
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ }
+
+ if (qla_ini_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
+ __func__, fcport->port_name);
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+
+errstate_exit:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &appplogireply,
+ sizeof(struct app_plogi_reply));
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_authfail - authentication by app has failed. Driver is given
+ * notice to tear down current session.
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct auth_complete_cmd appplogifail;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogifail,
+ sizeof(struct auth_complete_cmd));
+
+ /*
+ * TODO: edif: app has failed this plogi. Inform driver to
+ * take any action (if any).
+ */
+ switch (appplogifail.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogifail.u.wwpn, 0);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s undefined type: %x\n", __func__,
+ appplogifail.type);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ break;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s fcport is 0x%p\n", __func__, fcport);
+
+ if (fcport) {
+ /* set/reset edif values and flags */
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
+ __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
+
+ if (qla_ini_mode_enabled(fcport->vha)) {
+ fcport->send_els_logo = 1;
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+ }
+ }
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
+ * [initiator|target] mode. It can specific session with specific nport id or
+ * all sessions.
+ * @vha: host adapter pointer
+ * @bsg_job: user request pointer
+ */
+static int
+qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ int32_t num_cnt;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_pinfo_req app_req;
+ struct app_pinfo_reply *app_reply;
+ port_id_t tdid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_pinfo_req));
+
+ num_cnt = app_req.num_ports; /* num of ports alloc'd by app */
+
+ app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
+ sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL);
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+ uint32_t pcnt = 0;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ tdid = app_req.remote_pid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "APP request entry - portid=%06x.\n", tdid.b24);
+
+ /* Ran out of space */
+ if (pcnt > app_req.num_ports)
+ break;
+
+ if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
+ continue;
+
+ app_reply->ports[pcnt].rekey_count =
+ fcport->edif.rekey_cnt;
+
+ app_reply->ports[pcnt].remote_type =
+ VND_CMD_RTYPE_UNKNOWN;
+ if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_TARGET;
+ if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_INITIATOR;
+
+ app_reply->ports[pcnt].remote_pid = fcport->d_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x\n",
+ fcport->node_name, fcport->port_name, pcnt, fcport->d_id.b24);
+
+ switch (fcport->edif.auth_state) {
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
+ fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_NEEDED;
+ } else {
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_ELS_RCVD;
+ }
+ break;
+ default:
+ app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
+ break;
+ }
+
+ memcpy(app_reply->ports[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ app_reply->ports[pcnt].remote_state =
+ (atomic_read(&fcport->state) ==
+ FCS_ONLINE ? 1 : 0);
+
+ pcnt++;
+
+ if (tdid.b24 != 0)
+ break;
+ }
+ app_reply->port_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, app_reply,
+ sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt);
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getstats - app would like to read various statistics info
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int32_t
+qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t ret_size, size;
+
+ struct app_sinfo_req app_req;
+ struct app_stats_reply *app_reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_sinfo_req));
+ if (app_req.num_ports == 0) {
+ ql_dbg(ql_dbg_async, vha, 0x911d,
+ "%s app did not indicate number of ports to return\n",
+ __func__);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ }
+
+ size = sizeof(struct app_stats_reply) +
+ (sizeof(struct app_sinfo) * app_req.num_ports);
+
+ if (size > bsg_job->reply_payload.payload_len)
+ ret_size = bsg_job->reply_payload.payload_len;
+ else
+ ret_size = size;
+
+ app_reply = kzalloc(size, GFP_KERNEL);
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+ uint32_t pcnt = 0;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.enable) {
+ if (pcnt > app_req.num_ports)
+ break;
+
+ app_reply->elem[pcnt].rekey_count =
+ fcport->edif.rekey_cnt;
+ app_reply->elem[pcnt].tx_bytes =
+ fcport->edif.tx_bytes;
+ app_reply->elem[pcnt].rx_bytes =
+ fcport->edif.rx_bytes;
+
+ memcpy(app_reply->elem[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ pcnt++;
+ }
+ }
+ app_reply->elem_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, app_reply, ret_size);
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+int32_t
+qla_edif_app_mgmt(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct app_id appcheck;
+ bool done = true;
+ int32_t rval = 0;
+ uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ __func__, vnd_sc);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appcheck,
+ sizeof(struct app_id));
+
+ if (!vha->hw->flags.edif_enabled ||
+ test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
+ __func__, bsg_job, vha->dpc_flags);
+
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (!qla_edif_app_check(vha, appcheck)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s app checked failed.\n",
+ __func__);
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ switch (vnd_sc) {
+ case QL_VND_SC_SA_UPDATE:
+ done = false;
+ rval = qla24xx_sadb_update(bsg_job);
+ break;
+ case QL_VND_SC_APP_START:
+ rval = qla_edif_app_start(vha, bsg_job);
+ break;
+ case QL_VND_SC_APP_STOP:
+ rval = qla_edif_app_stop(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_OK:
+ rval = qla_edif_app_authok(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_FAIL:
+ rval = qla_edif_app_authfail(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_FCINFO:
+ rval = qla_edif_app_getfcinfo(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_STATS:
+ rval = qla_edif_app_getstats(vha, bsg_job);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
+ __func__,
+ bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
+ rval = EXT_STATUS_INVALID_PARAM;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ break;
+ }
+
+done:
+ if (done) {
+ ql_dbg(ql_dbg_user, vha, 0x7009,
+ "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
+
+ return rval;
+}
+
+static struct edif_sa_ctl *
+qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
+ int dir)
+{
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame *sap;
+ int index = sa_frame->fast_sa_index;
+ unsigned long flags = 0;
+
+ sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
+ if (!sa_ctl) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "unable to allocate SA CTL\n");
+ return NULL;
+ }
+
+ /*
+ * need to allocate sa_index here and save it
+ * in both sa_ctl->index and sa_frame->fast_sa_index;
+ * If alloc fails then delete sa_ctl and return NULL
+ */
+ INIT_LIST_HEAD(&sa_ctl->next);
+ sap = &sa_ctl->sa_frame;
+ *sap = *sa_frame;
+ sa_ctl->index = index;
+ sa_ctl->fcport = fcport;
+ sa_ctl->flags = 0;
+ sa_ctl->state = 0L;
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ if (dir == SAU_FLG_TX)
+ list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
+ else
+ list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+
+ return sa_ctl;
+}
+
+void
+qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+}
+
+struct edif_sa_ctl *
+qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ struct list_head *sa_list;
+
+ if (dir == SAU_FLG_TX)
+ sa_list = &fcport->edif.tx_sa_list;
+ else
+ sa_list = &fcport->edif.rx_sa_list;
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
+ if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
+ sa_ctl->index == index)
+ return sa_ctl;
+ }
+ return NULL;
+}
+
+/* add the sa to the correct list */
+static int
+qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_ctl *sa_ctl = NULL;
+ int dir;
+ uint16_t sa_index;
+
+ dir = (sa_frame->flags & SAU_FLG_TX);
+
+ /* map the spi to an sa_index */
+ sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
+ if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
+ /* process rx delete */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
+ __func__, fcport->loop_id, sa_frame->spi);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(fcport->vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+
+ /* force a return of good bsg status; */
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ } else if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
+ __func__, sa_frame->spi, dir);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
+ __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
+
+ /* This is a local copy of sa_frame. */
+ sa_frame->fast_sa_index = sa_index;
+ /* create the sa_ctl */
+ sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
+ __func__, sa_frame->spi, dir, sa_index);
+ return -1;
+ }
+
+ set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
+
+ if (dir == SAU_FLG_TX)
+ fcport->edif.tx_rekey_cnt++;
+ else
+ fcport->edif.rx_rekey_cnt++;
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
+ fcport->edif.tx_rekey_cnt,
+ fcport->edif.rx_rekey_cnt, fcport->loop_id);
+
+ return 0;
+}
+
+#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
+#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+
+int
+qla24xx_sadb_update(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = NULL;
+ srb_t *sp = NULL;
+ struct edif_list_entry *edif_entry = NULL;
+ int found = 0;
+ int rval = 0;
+ int result = 0;
+ struct qla_sa_update_frame sa_frame;
+ struct srb_iocb *iocb_cmd;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
+ "%s entered, vha: 0x%p\n", __func__, vha);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sa_frame,
+ sizeof(struct qla_sa_update_frame));
+
+ /* Check if host is online */
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
+ if (fcport) {
+ found = 1;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
+ fcport->edif.tx_bytes = 0;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
+ fcport->edif.rx_bytes = 0;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
+ sa_frame.port_id.b24);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+ goto done;
+ }
+
+ /* make sure the nport_handle is valid */
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
+ __func__, fcport->port_name, sa_frame.spi,
+ fcport->disc_state);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
+ goto done;
+ }
+
+ /* allocate and queue an sa_ctl */
+ result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
+
+ /* failure of bsg */
+ if (result == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping update.\n",
+ __func__, fcport->port_name);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+
+ /* rx delete failure */
+ } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping rx delete.\n",
+ __func__, fcport->port_name);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
+ __func__, fcport->port_name, sa_frame.fast_sa_index,
+ sa_frame.flags);
+
+ /* looking for rx index and delete */
+ if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ (sa_frame.flags & SAU_FLG_INV)) {
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+
+ /*
+ * make sure we have an existing rx key, otherwise just process
+ * this as a straight delete just like TX
+ * This is NOT a normal case, it indicates an error recovery or key cleanup
+ * by the ipsec code above us.
+ */
+ edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
+ if (!edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
+ __func__, fcport->loop_id, sa_index);
+ goto force_rx_delete;
+ }
+
+ /*
+ * if we have a forced delete for rx, remove the sa_index from the edif list
+ * and proceed with normal delete. The rx delay timer should not be running
+ */
+ if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
+ __func__, fcport->loop_id, sa_index);
+ kfree(edif_entry);
+ goto force_rx_delete;
+ }
+
+ /*
+ * delayed rx delete
+ *
+ * if delete_sa_index is not invalid then there is already
+ * a delayed index in progress, return bsg bad status
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ struct edif_sa_ctl *sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
+ __func__, edif_entry->handle, edif_entry->delete_sa_index);
+
+ /* free up the sa_ctl that was allocated with the sa_index */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
+ (sa_frame.flags & SAU_FLG_TX));
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ }
+
+ /* release the sa_index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
+
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ fcport->edif.rekey_cnt++;
+
+ /* configure and start the rx delay timer */
+ edif_entry->fcport = fcport;
+ edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
+ __func__, edif_entry, sa_index, nport_handle);
+
+ /*
+ * Start the timer when we queue the delayed rx delete.
+ * This is an activity timer that goes off if we have not
+ * received packets with the new sa_index
+ */
+ add_timer(&edif_entry->timer);
+
+ /*
+ * sa_delete for rx key with an active rx key including this one
+ * add the delete rx sa index to the hash so we can look for it
+ * in the rsp queue. Do this after making any changes to the
+ * edif_entry as part of the rx delete.
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
+ __func__, sa_index, nport_handle, bsg_job);
+
+ edif_entry->delete_sa_index = sa_index;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+
+ goto done;
+
+ /*
+ * rx index and update
+ * add the index to the list and continue with normal update
+ */
+ } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ ((sa_frame.flags & SAU_FLG_INV) == 0)) {
+ /* sa_update for rx key */
+ uint32_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+ int result;
+
+ /*
+ * add the update rx sa index to the hash so we can look for it
+ * in the rsp queue and continue normally
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
+ __func__, sa_index, nport_handle);
+
+ result = qla_edif_list_add_sa_update_index(fcport, sa_index,
+ nport_handle);
+ if (result) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
+ __func__, sa_index, nport_handle);
+ }
+ }
+ if (sa_frame.flags & SAU_FLG_GMAC_MODE)
+ fcport->edif.aes_gmac = 1;
+ else
+ fcport->edif.aes_gmac = 0;
+
+force_rx_delete:
+ /*
+ * sa_update for both rx and tx keys, sa_delete for tx key
+ * immediately process the request
+ */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->type = SRB_SA_UPDATE;
+ sp->name = "bsg_sa_update";
+ sp->u.bsg_job = bsg_job;
+ /* sp->free = qla2x00_bsg_sp_free; */
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla2x00_bsg_job_done;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_frame = sa_frame;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_dbg_edif, vha, 0x70e3,
+ "qla2x00_start_sp failed=%d.\n", rval);
+
+ qla2x00_rel_sp(sp);
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: %s sent, hdl=%x, portid=%06x.\n",
+ __func__, sp->name, sp->handle, fcport->d_id.b24);
+
+ fcport->edif.rekey_cnt++;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ return 0;
+
+/*
+ * send back error status
+ */
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
+ __func__, bsg_reply->result, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return 0;
+}
+
+static void
+qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
+{
+ node->ntype = N_UNDEF;
+ kfree(node);
+}
+
+/**
+ * qla_enode_init - initialize enode structs & lock
+ * @vha: host adapter pointer
+ *
+ * should only be called when driver attaching
+ */
+void
+qla_enode_init(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ char name[32];
+
+ if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
+ /* list still active - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
+ __func__);
+ return;
+ }
+
+ /* initialize lock which protects pur_core & init list */
+ spin_lock_init(&vha->pur_cinfo.pur_lock);
+ INIT_LIST_HEAD(&vha->pur_cinfo.head);
+
+ snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
+ ha->pdev->device);
+}
+
+/**
+ * qla_enode_stop - stop and clear and enode data
+ * @vha: host adapter pointer
+ *
+ * called when app notified it is exiting
+ */
+void
+qla_enode_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct enode *node, *q;
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s enode not active\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
+
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
+ node->dinfo.nodecnt);
+ list_del_init(&node->list);
+ qla_enode_free(vha, node);
+ }
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+}
+
+/*
+ * allocate enode struct and populate buffer
+ * returns: enode pointer with buffers
+ * NULL on error
+ */
+static struct enode *
+qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct enode *node;
+ struct purexevent *purex;
+
+ node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
+ if (!node)
+ return NULL;
+
+ purex = &node->u.purexinfo;
+ purex->msgp = (u8 *)(node + 1);
+ purex->msgp_len = ELS_MAX_PAYLOAD;
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+static void
+qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
+{
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
+ "%s add enode for type=%x, cnt=%x\n",
+ __func__, ptr->ntype, ptr->dinfo.nodecnt);
+
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+ list_add_tail(&ptr->list, &vha->pur_cinfo.head);
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return;
+}
+
+static struct enode *
+qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
+{
+ struct enode *node_rtn = NULL;
+ struct enode *list_node = NULL;
+ unsigned long flags;
+ struct list_head *pos, *q;
+ uint32_t sid;
+ uint32_t rw_flag;
+ struct purexevent *purex;
+
+ /* secure the list from moving under us */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ list_for_each_safe(pos, q, &vha->pur_cinfo.head) {
+ list_node = list_entry(pos, struct enode, list);
+
+ /* node type determines what p1 and p2 are */
+ purex = &list_node->u.purexinfo;
+ sid = p1;
+ rw_flag = p2;
+
+ if (purex->pur_info.pur_sid.b24 == sid) {
+ if (purex->pur_info.pur_pend == 1 &&
+ rw_flag == PUR_GET) {
+ /*
+ * if the receive is in progress
+ * and its a read/get then can't
+ * transfer yet
+ */
+ ql_dbg(ql_dbg_edif, vha, 0x9106,
+ "%s purex xfer in progress for sid=%x\n",
+ __func__, sid);
+ } else {
+ /* found it and its complete */
+ node_rtn = list_node;
+ list_del(pos);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return node_rtn;
+}
+
+/**
+ * qla_pur_get_pending - read/return authentication message sent
+ * from remote port
+ * @vha: host adapter pointer
+ * @fcport: session pointer
+ * @bsg_job: user request where the message is copy to.
+ */
+static int
+qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct bsg_job *bsg_job)
+{
+ struct enode *ptr;
+ struct purexevent *purex;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ bsg_job->reply_len = sizeof(*rpl);
+
+ ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x9111,
+ "%s no enode data found for %8phN sid=%06x\n",
+ __func__, fcport->port_name, fcport->d_id.b24);
+ SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
+ return -EIO;
+ }
+
+ /*
+ * enode is now off the linked list and is ours to deal with
+ */
+ purex = &ptr->u.purexinfo;
+
+ /* Copy info back to caller */
+ rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
+
+ SET_DID_STATUS(rpl->r.result, DID_OK);
+ rpl->r.reply_payload_rcv_len =
+ sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, purex->msgp,
+ purex->pur_info.pur_bytes_rcvd, 0);
+
+ /* data copy / passback completed - destroy enode */
+ qla_enode_free(vha, ptr);
+
+ return 0;
+}
+
+/* it is assume qpair lock is held */
+static int
+qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
+ struct qla_els_pt_arg *a)
+{
+ struct els_entry_24xx *els_iocb;
+
+ els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!els_iocb) {
+ ql_log(ql_log_warn, vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla_els_pt_iocb(vha, els_iocb, a);
+
+ ql_dbg(ql_dbg_edif, vha, 0x0183,
+ "Sending ELS reject...\n");
+ ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
+ vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+void
+qla_edb_init(scsi_qla_host_t *vha)
+{
+ if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ /* list already init'd - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "edif db already initialized, cannot reinit\n");
+ return;
+ }
+
+ /* initialize lock which protects doorbell & init list */
+ spin_lock_init(&vha->e_dbell.db_lock);
+ INIT_LIST_HEAD(&vha->e_dbell.head);
+
+ /* create and initialize doorbell */
+ init_completion(&vha->e_dbell.dbell);
+}
+
+static void
+qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ /*
+ * releases the space held by this edb node entry
+ * this function does _not_ free the edb node itself
+ * NB: the edb node entry passed should not be on any list
+ *
+ * currently for doorbell there's no additional cleanup
+ * needed, but here as a placeholder for furture use.
+ */
+
+ if (!node) {
+ ql_dbg(ql_dbg_edif, vha, 0x09122,
+ "%s error - no valid node passed\n", __func__);
+ return;
+ }
+
+ node->ntype = N_UNDEF;
+}
+
+/* function called when app is stopping */
+
+void
+qla_edb_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *node, *q;
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing edb_node type=%x\n",
+ __func__, node->ntype);
+ qla_edb_node_free(vha, node);
+ list_del(&node->list);
+
+ kfree(node);
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ /* wake up doorbell waiters - they'll be dismissed with error code */
+ complete_all(&vha->e_dbell.dbell);
+}
+
+static struct edb_node *
+qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct edb_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, vha, 0x9100,
+ "edb node unable to be allocated\n");
+ return NULL;
+ }
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+/* adds a already allocated enode to the linked list */
+static bool
+qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
+{
+ unsigned long flags;
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return false;
+ }
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ list_add_tail(&ptr->list, &vha->e_dbell.head);
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ /* ring doorbell for waiters */
+ complete(&vha->e_dbell.dbell);
+
+ return true;
+}
+
+/* adds event to doorbell list */
+void
+qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
+ uint32_t data, uint32_t data2, fc_port_t *sfcport)
+{
+ struct edb_node *edbnode;
+ fc_port_t *fcport = sfcport;
+ port_id_t id;
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif not enabled */
+ return;
+ }
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled (type=%d\n", __func__, dbtype);
+ return;
+ }
+
+ edbnode = qla_edb_node_alloc(vha, dbtype);
+ if (!edbnode) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to alloc db node\n", __func__);
+ return;
+ }
+
+ if (!fcport) {
+ id.b.domain = (data >> 16) & 0xff;
+ id.b.area = (data >> 8) & 0xff;
+ id.b.al_pa = data & 0xff;
+ ql_dbg(ql_dbg_edif, vha, 0x09222,
+ "%s: Arrived s_id: %06x\n", __func__,
+ id.b24);
+ fcport = qla2x00_find_fcport_by_pid(vha, &id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s can't find fcport for sid= 0x%x - ignoring\n",
+ __func__, id.b24);
+ kfree(edbnode);
+ return;
+ }
+ }
+
+ /* populate the edb node */
+ switch (dbtype) {
+ case VND_CMD_AUTH_STATE_NEEDED:
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ edbnode->u.plogi_did.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ edbnode->u.els_sid.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ edbnode->u.sa_aen.port_id = fcport->d_id;
+ edbnode->u.sa_aen.status = data;
+ edbnode->u.sa_aen.key_type = data2;
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unknown type: %x\n", __func__, dbtype);
+ qla_edb_node_free(vha, edbnode);
+ kfree(edbnode);
+ edbnode = NULL;
+ break;
+ }
+
+ if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ qla_edb_node_free(vha, edbnode);
+ kfree(edbnode);
+ return;
+ }
+ if (edbnode && fcport)
+ fcport->edif.auth_state = dbtype;
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+}
+
+static struct edb_node *
+qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+/*
+ * app uses separate thread to read this. It'll wait until the doorbell
+ * is rung by the driver or the max wait time has expired
+ */
+ssize_t
+edif_doorbell_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
+ uint32_t dat_siz, buf_size, sz;
+
+ /* TODO: app currently hardcoded to 256. Will transition to bsg */
+ sz = 256;
+
+ /* stop new threads from waiting if we're not init'd */
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+ "%s error - edif db not enabled\n", __func__);
+ return 0;
+ }
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif not enabled */
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+ "%s error - edif not enabled\n", __func__);
+ return -1;
+ }
+
+ buf_size = 0;
+ while ((sz - buf_size) >= sizeof(struct edb_node)) {
+ /* remove the next item from the doorbell list */
+ dat_siz = 0;
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ ap->event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap->port_id = dbnode->u.plogi_did;
+ dat_siz += sizeof(ap->port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap->port_id = dbnode->u.els_sid;
+ dat_siz += sizeof(ap->port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap->port_id = dbnode->u.sa_aen.port_id;
+ memcpy(ap->event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_siz += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ /* unknown node type, rtn unknown ntype */
+ ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
+ memcpy(ap->event_data, &dbnode->ntype, 4);
+ dat_siz += 4;
+ break;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ /* we're done with the db node, so free it up */
+ qla_edb_node_free(vha, dbnode);
+ kfree(dbnode);
+ } else {
+ break;
+ }
+
+ ap->event_data_size = dat_siz;
+ /* 8bytes = ap->event_code + ap->event_data_size */
+ buf_size += dat_siz + 8;
+ ap = (struct edif_app_dbell *)(buf + buf_size);
+ }
+ return buf_size;
+}
+
+static void qla_noop_sp_done(srb_t *sp, int res)
+{
+ sp->free(sp);
+}
+
+/*
+ * Called from work queue
+ * build and send the sa_update iocb to delete an rx sa_index
+ */
+int
+qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
+{
+ srb_t *sp;
+ fc_port_t *fcport = NULL;
+ struct srb_iocb *iocb_cmd = NULL;
+ int rval = QLA_SUCCESS;
+ struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
+ uint16_t nport_handle = e->u.sa_update.nport_handle;
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
+
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "sa_ctl allocation failed\n");
+ return -ENOMEM;
+ }
+
+ fcport = sa_ctl->fcport;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3073,
+ "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
+ fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
+ /*
+ * if this is a sadb cleanup delete, mark it so the isr can
+ * take the correct action
+ */
+ if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
+ /* mark this srb as a cleanup delete */
+ sp->flags |= SRB_EDIF_CLEANUP_DELETE;
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
+ }
+
+ sp->type = SRB_SA_REPLACE;
+ sp->name = "SA_REPLACE";
+ sp->fcport = fcport;
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla_noop_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+
+ if (rval != QLA_SUCCESS)
+ rval = QLA_FUNCTION_FAILED;
+
+ return rval;
+}
+
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ int itr = 0;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_sa_update_frame *sa_frame =
+ &sp->u.iocb_cmd.u.sa_update.sa_frame;
+ u8 flags = 0;
+
+ switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_INVALIDATE;
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX;
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
+ break;
+ }
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+ sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ sa_update_iocb->flags = flags;
+ sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
+ sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
+ sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
+
+ sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
+ if (sp->fcport->edif.aes_gmac)
+ sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
+
+ if (sa_frame->flags & SAU_FLG_KEY256) {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY256;
+ for (itr = 0; itr < 32; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ } else {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY128;
+ for (itr = 0; itr < 16; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
+ sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
+ sp->fcport->edif.aes_gmac);
+
+ if (sa_frame->flags & SAU_FLG_TX)
+ sp->fcport->edif.tx_sa_pending = 1;
+ else
+ sp->fcport->edif.rx_sa_pending = 1;
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void
+qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ struct scsi_qla_host *vha = sp->vha;
+ struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
+ struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
+ uint16_t nport_handle = sp->fcport->loop_id;
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+
+ sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
+
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ /* Invalidate the index. salt, spi, control & key are ignore */
+ sa_update_iocb->flags = SA_FLAG_INVALIDATE;
+ sa_update_iocb->salt = 0;
+ sa_update_iocb->spi = 0;
+ sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
+ sa_update_iocb->sa_control = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
+ sa_update_iocb->sa_index, sp->handle);
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
+{
+ struct purex_entry_24xx *p = *pkt;
+ struct enode *ptr;
+ int sid;
+ u16 totlen;
+ struct purexevent *purex;
+ struct scsi_qla_host *host = NULL;
+ int rc;
+ struct fc_port *fcport;
+ struct qla_els_pt_arg a;
+ be_id_t beid;
+
+ memset(&a, 0, sizeof(a));
+
+ a.els_opcode = ELS_AUTH_ELS;
+ a.nport_handle = p->nport_handle;
+ a.rx_xchg_address = p->rx_xchg_addr;
+ a.did.b.domain = p->s_id[2];
+ a.did.b.area = p->s_id[1];
+ a.did.b.al_pa = p->s_id[0];
+ a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
+ a.tx_addr = vha->hw->elsrej.cdma;
+ a.vp_idx = vha->vp_idx;
+ a.control_flags = EPD_ELS_RJT;
+
+ sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
+
+ totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
+ if (le16_to_cpu(p->status_flags) & 0x8000) {
+ totlen = le16_to_cpu(p->trunc_frame_size);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (totlen > MAX_PAYLOAD) {
+ ql_dbg(ql_dbg_edif, vha, 0x0910d,
+ "%s WARNING: verbose ELS frame received (totlen=%x)\n",
+ __func__, totlen);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif support not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
+ __func__);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ ptr = qla_enode_alloc(vha, N_PUREX);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x09109,
+ "WARNING: enode alloc failed for sid=%x\n",
+ sid);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ purex = &ptr->u.purexinfo;
+ purex->pur_info.pur_sid = a.did;
+ purex->pur_info.pur_pend = 0;
+ purex->pur_info.pur_bytes_rcvd = totlen;
+ purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
+ purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
+ purex->pur_info.pur_did.b.domain = p->d_id[2];
+ purex->pur_info.pur_did.b.area = p->d_id[1];
+ purex->pur_info.pur_did.b.al_pa = p->d_id[0];
+ purex->pur_info.vp_idx = p->vp_idx;
+
+ rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
+ purex->msgp_len);
+ if (rc) {
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+ beid.al_pa = purex->pur_info.pur_did.b.al_pa;
+ beid.area = purex->pur_info.pur_did.b.area;
+ beid.domain = purex->pur_info.pur_did.b.domain;
+ host = qla_find_host_by_d_id(vha, beid);
+ if (!host) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "%s Drop ELS due to unable to find host %06x\n",
+ __func__, purex->pur_info.pur_did.b24);
+
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+
+ fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
+
+ if (host->e_dbell.db_flags != EDB_ACTIVE ||
+ (fcport && fcport->loop_id == FC_NO_LOOP_ID)) {
+ ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
+ __func__, host->e_dbell.db_flags,
+ fcport ? fcport->d_id.b24 : 0);
+
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ return;
+ }
+
+ /* add the local enode to the list */
+ qla_enode_add(host, ptr);
+
+ ql_dbg(ql_dbg_edif, host, 0x0910c,
+ "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
+ __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
+ purex->pur_info.pur_did.b24, p->rx_xchg_addr);
+
+ qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
+}
+
+static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ void *sa_id_map;
+ unsigned long flags = 0;
+ u16 sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_id_map = ha->edif_tx_sa_id_map;
+ else
+ sa_id_map = ha->edif_rx_sa_id_map;
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
+ if (sa_index >= EDIF_NUM_SA_INDEX) {
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ return INVALID_EDIF_SA_INDEX;
+ }
+ set_bit(sa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+
+ if (dir)
+ sa_index += EDIF_TX_SA_INDEX_BASE;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index retrieved from free pool %d\n", __func__, sa_index);
+
+ return sa_index;
+}
+
+/* find an sadb entry for an nport_handle */
+static struct edif_sa_index_entry *
+qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list)
+{
+ struct edif_sa_index_entry *entry;
+ struct edif_sa_index_entry *tentry;
+ struct list_head *indx_list = sa_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == nport_handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* remove an sa_index from the nport_handle and return it to the free pool */
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ int slot = 0;
+ int free_slot_count = 0;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: no entry found for nport_handle 0x%x\n",
+ __func__, nport_handle);
+ return -1;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ /*
+ * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
+ * the other is use at re-key time.
+ */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == sa_index) {
+ entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
+ entry->sa_pair[slot].spi = 0;
+ free_slot_count++;
+ qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
+ } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot_count++;
+ }
+ }
+
+ if (free_slot_count == 2) {
+ list_del(&entry->next);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_index %d removed, free_slot_count: %d\n",
+ __func__, sa_index, free_slot_count);
+
+ return 0;
+}
+
+void
+qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
+ struct sa_update_28xx *pkt)
+{
+ const char *func = "SA_UPDATE_RESPONSE_IOCB";
+ srb_t *sp;
+ struct edif_sa_ctl *sa_ctl;
+ int old_sa_deleted = 1;
+ uint16_t nport_handle;
+ struct scsi_qla_host *vha;
+
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
+
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, v, 0x3063,
+ "%s: no sp found for pkt\n", __func__);
+ return;
+ }
+ /* use sp->vha due to npiv */
+ vha = sp->vha;
+
+ switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ }
+
+ /*
+ * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
+ * to be correct during cleanup sa_update iocbs.
+ */
+ nport_handle = sp->fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
+ __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
+ nport_handle, pkt->sa_index, pkt->flags, sp->handle);
+
+ /* if rx delete, remove the timer */
+ if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
+ struct edif_list_entry *edif_entry;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: removing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+ qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
+ del_timer(&edif_entry->timer);
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+
+ /*
+ * if this is a delete for either tx or rx, make sure it succeeded.
+ * The new_sa_info field should be 0xffff on success
+ */
+ if (pkt->flags & SA_FLAG_INVALIDATE)
+ old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
+
+ /* Process update and delete the same way */
+
+ /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
+ if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: nph 0x%x, sa_index %d removed from fw\n",
+ __func__, sp->fcport->loop_id, pkt->sa_index);
+
+ } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
+ old_sa_deleted) {
+ /*
+ * Note: Wa are only keeping track of latest SA,
+ * so we know when we can start enableing encryption per I/O.
+ * If all SA's get deleted, let FW reject the IOCB.
+
+ * TODO: edif: don't set enabled here I think
+ * TODO: edif: prli complete is where it should be set
+ */
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "SA(%x)updated for s_id %02x%02x%02x\n",
+ pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+ sp->fcport->edif.enable = 1;
+ if (pkt->flags & SA_FLAG_TX) {
+ sp->fcport->edif.tx_sa_set = 1;
+ sp->fcport->edif.tx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ } else {
+ sp->fcport->edif.rx_sa_set = 1;
+ sp->fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
+ __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+
+ if (pkt->flags & SA_FLAG_TX)
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ else
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+
+ /* for delete, release sa_ctl, sa_index */
+ if (pkt->flags & SA_FLAG_INVALIDATE) {
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
+ le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
+ (pkt->flags & SA_FLAG_TX)) != NULL) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n",
+ __func__, sa_ctl);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, le16_to_cpu(pkt->sa_index), nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ /*
+ * check for a failed sa_update and remove
+ * the sadb entry.
+ */
+ } else if (pkt->u.comp_sts) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, pkt->sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ }
+
+ sp->done(sp, 0);
+}
+
+/**
+ * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Return: non-zero if a failure occurred, else zero.
+ */
+int
+qla28xx_start_scsi_edif(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index, i;
+ uint32_t handle;
+ uint16_t cnt;
+ int16_t req_cnt;
+ uint16_t tot_dsds;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_6 *cmd_pkt;
+ struct dsd64 *cur_dsd;
+ uint8_t avail_dsds = 0;
+ struct scatterlist *sg;
+ struct req_que *req = sp->qpair->req;
+ spinlock_t *lock = sp->qpair->qp_lock_ptr;
+
+ /* Setup device pointers. */
+ cmd = GET_CMD_SP(sp);
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ rd_reg_dword(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = sp->u.scmd.ct6_ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /*
+ * Zero out remaining portion of packet.
+ * tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = cpu_to_le32(0);
+ goto no_dsds;
+ }
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
+ }
+
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+ cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
+
+ /* One DSD is available in the Command Type 6 IOCB */
+ avail_dsds = 1;
+ cur_dsd = &cmd_pkt->fcp_dsd;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ put_unaligned_le64(sle_dma, &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
+ cur_dsd++;
+ avail_dsds--;
+ }
+
+no_dsds:
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ cmd_pkt->entry_type = COMMAND_TYPE_6;
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->entry_status = 0;
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Adjust ring index. */
+ wmb();
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->u.scmd.ct6_ctx) {
+ mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+ sp->u.scmd.ct6_ctx = NULL;
+ }
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+/**********************************************
+ * edif update/delete sa_index list functions *
+ **********************************************/
+
+/* clear the edif_indx_list for this port */
+void qla_edif_list_del(fc_port_t *fcport)
+{
+ struct edif_list_entry *indx_lst;
+ struct edif_list_entry *tindx_lst;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
+ list_del(&indx_lst->next);
+ kfree(indx_lst);
+ }
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+/******************
+ * SADB functions *
+ ******************/
+
+/* allocate/retrieve an sa_index for a given spi */
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ uint16_t sa_index;
+ int dir = sa_frame->flags & SAU_FLG_TX;
+ int slot = 0;
+ int free_slot = -1;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry fc_port: %p, nport_handle: 0x%x\n",
+ __func__, fcport, nport_handle);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: rx delete request with no entry\n", __func__);
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ }
+
+ /* if there is no entry for this nport, add one */
+ entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
+ if (!entry)
+ return INVALID_EDIF_SA_INDEX;
+
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ kfree(entry);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = nport_handle;
+ entry->fcport = fcport;
+ entry->sa_pair[0].spi = sa_frame->spi;
+ entry->sa_pair[0].sa_index = sa_index;
+ entry->sa_pair[1].spi = 0;
+ entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ list_add_tail(&entry->next, sa_list);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
+ __func__, nport_handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ /* see if we already have an entry for this spi */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot = slot;
+ } else {
+ if (entry->sa_pair[slot].spi == sa_frame->spi) {
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
+ __func__, slot, entry->handle, sa_frame->spi,
+ entry->sa_pair[slot].sa_index);
+ return entry->sa_pair[slot].sa_index;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ /* both slots are used */
+ if (free_slot == -1) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
+ __func__, entry->handle, sa_frame->spi);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
+ __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
+ entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
+
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ /* there is at least one free slot, use it */
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: empty freepool!!\n", __func__);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ entry->sa_pair[free_slot].spi = sa_frame->spi;
+ entry->sa_pair[free_slot].sa_index = sa_index;
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
+ __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+}
+
+/* release any sadb entries -- only done at teardown */
+void qla_edif_sadb_release(struct qla_hw_data *ha)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+ struct edif_sa_index_entry *entry;
+
+ list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) {
+ entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_del(&entry->next);
+ kfree(entry);
+ }
+
+ list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) {
+ entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_del(&entry->next);
+ kfree(entry);
+ }
+}
+
+/**************************
+ * sadb freepool functions
+ **************************/
+
+/* build the rx and tx sa_index free pools -- only done at fcport init */
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
+{
+ ha->edif_tx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+
+ if (!ha->edif_tx_sa_id_map) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb tx.\n");
+ return -ENOMEM;
+ }
+
+ ha->edif_rx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+ if (!ha->edif_rx_sa_id_map) {
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb rx.\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* release the free pool - only done during fcport teardown */
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
+{
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ kfree(ha->edif_rx_sa_id_map);
+ ha->edif_rx_sa_id_map = NULL;
+}
+
+static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t cached_nport_handle;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
+ if (!edif_entry) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return; /* no pending delete for this handle */
+ }
+
+ /*
+ * check for no pending delete for this index or iocb does not
+ * match rx sa_index
+ */
+ if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
+ edif_entry->update_sa_index != sa_index) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ /*
+ * wait until we have seen at least EDIF_DELAY_COUNT transfers before
+ * queueing RX delete
+ */
+ if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
+
+ delete_sa_index = edif_entry->delete_sa_index;
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ cached_nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ /* sanity check on the nport handle */
+ if (nport_handle != cached_nport_handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
+ __func__, nport_handle, cached_nport_handle);
+ }
+
+ /* find the sa_ctl for the delete and schedule the delete */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
+ __func__, sa_ctl, sa_index);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
+ delete_sa_index,
+ edif_entry->update_sa_index, nport_handle, handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
+ __func__, delete_sa_index);
+ }
+}
+
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ fc_port_t *fcport = sp->fcport;
+ /* sa_index used by this iocb */
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint32_t handle;
+
+ handle = (uint32_t)LSW(sts24->handle);
+
+ /* find out if this status iosb is for a scsi read */
+ if (cmd->sc_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
+ le16_to_cpu(sts24->edif_sa_index));
+}
+
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *pkt)
+{
+ __chk_edif_rx_sa_delete_pending(vha, fcport,
+ pkt->handle, le16_to_cpu(pkt->edif_sa_index));
+}
+
+static void qla_parse_auth_els_ctl(struct srb *sp)
+{
+ struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
+ struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
+ struct fc_bsg_request *request = bsg_job->request;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ a->tx_len = a->tx_byte_count = sp->remap.req.len;
+ a->tx_addr = sp->remap.req.dma;
+ a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
+ a->rx_addr = sp->remap.rsp.dma;
+
+ if (p->e.sub_cmd == SEND_ELS_REPLY) {
+ a->control_flags = p->e.extra_control_flags << 13;
+ a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
+ if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
+ a->els_opcode = ELS_LS_ACC;
+ else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
+ a->els_opcode = ELS_LS_RJT;
+ }
+ a->did = sp->fcport->d_id;
+ a->els_opcode = request->rqst_data.h_els.command_code;
+ a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ a->vp_idx = sp->vha->vp_idx;
+}
+
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int rval = (DID_ERROR << 16);
+ port_id_t d_id;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
+ d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
+ d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
+
+ /* find matching d_id in fcport list */
+ fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x911a,
+ "%s fcport not find online portid=%06x.\n",
+ __func__, d_id.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
+ if (qla_bsg_check(vha, bsg_job, fcport))
+ return 0;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x910d,
+ "%s ELS code %x, no loop id.\n", __func__,
+ bsg_request->rqst_data.r_els.els_code);
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ return -ENXIO;
+ }
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EIO;
+ goto done;
+ }
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EPERM;
+ goto done;
+ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_user, vha, 0x7004,
+ "Failed get sp pid=%06x\n", fcport->d_id.b24);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->remap.req.len = bsg_job->request_payload.payload_len;
+ sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.req.dma);
+ if (!sp->remap.req.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7005,
+ "Failed allocate request dma len=%x\n",
+ bsg_job->request_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_sp;
+ }
+
+ sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
+ sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.rsp.dma);
+ if (!sp->remap.rsp.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Failed allocate response dma len=%x\n",
+ bsg_job->reply_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_remap_req;
+ }
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
+ sp->remap.req.len);
+ sp->remap.remapped = true;
+
+ sp->type = SRB_ELS_CMD_HST_NOLOGIN;
+ sp->name = "SPCN_BSG_HST_NOLOGIN";
+ sp->u.bsg_cmd.bsg_job = bsg_job;
+ qla_parse_auth_els_ctl(sp);
+
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ rval = qla2x00_start_sp(sp);
+
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ rval = -EIO;
+ goto done_free_remap_rsp;
+ }
+ return rval;
+
+done_free_remap_rsp:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+done_free_remap_req:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+done_free_sp:
+ qla2x00_rel_sp(sp);
+
+done:
+ return rval;
+}
+
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
+{
+ if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN send port_offline event\n",
+ __func__, sess->port_name);
+ sess->edif.app_sess_online = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
+ sess->d_id.b24, 0, sess);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
new file mode 100644
index 000000000000..1cff02e5bd43
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#ifndef __QLA_EDIF_H
+#define __QLA_EDIF_H
+
+struct qla_scsi_host;
+#define EDIF_APP_ID 0x73730001
+
+#define EDIF_MAX_INDEX 2048
+struct edif_sa_ctl {
+ struct list_head next;
+ uint16_t del_index;
+ uint16_t index;
+ uint16_t slot;
+ uint16_t flags;
+#define EDIF_SA_CTL_FLG_REPL BIT_0
+#define EDIF_SA_CTL_FLG_DEL BIT_1
+#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4
+ // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE
+ unsigned long state;
+#define EDIF_SA_CTL_USED 1 /* Active Sa update */
+#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */
+#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */
+#define EDIF_SA_CTL_DEL 4 /* Delete Pending */
+ struct fc_port *fcport;
+ struct bsg_job *bsg_job;
+ struct qla_sa_update_frame sa_frame;
+};
+
+enum enode_flags_t {
+ ENODE_ACTIVE = 0x1,
+};
+
+struct pur_core {
+ enum enode_flags_t enode_flags;
+ spinlock_t pur_lock;
+ struct list_head head;
+};
+
+enum db_flags_t {
+ EDB_ACTIVE = 0x1,
+};
+
+struct edif_dbell {
+ enum db_flags_t db_flags;
+ spinlock_t db_lock;
+ struct list_head head;
+ struct completion dbell;
+};
+
+#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
+struct sa_update_28xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* IOCB System handle. */
+
+ union {
+ __le16 nport_handle; /* in: N_PORT handle. */
+ __le16 comp_sts; /* out: completion status */
+#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64
+#define CS_PORT_EDIF_INV_REQ 0x66
+ } u;
+ uint8_t vp_index;
+ uint8_t reserved_1;
+ uint8_t port_id[3];
+ uint8_t flags;
+#define SA_FLAG_INVALIDATE BIT_0
+#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx
+
+ uint8_t sa_key[32]; /* 256 bit key */
+ __le32 salt;
+ __le32 spi;
+ uint8_t sa_control;
+#define SA_CNTL_ENC_FCSP (1 << 3)
+#define SA_CNTL_ENC_OPD (2 << 3)
+#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3
+#define SA_CNTL_AES_GMAC (1 << 2)
+#define SA_CNTL_KEY256 (2 << 0)
+#define SA_CNTL_KEY128 0
+
+ uint8_t reserved_2;
+ __le16 sa_index; // reserve: bit 11-15
+ __le16 old_sa_info;
+ __le16 new_sa_info;
+};
+
+#define NUM_ENTRIES 256
+#define MAX_PAYLOAD 1024
+#define PUR_GET 1
+
+struct dinfo {
+ int nodecnt;
+ int lstate;
+};
+
+struct pur_ninfo {
+ unsigned int pur_pend:1;
+ port_id_t pur_sid;
+ port_id_t pur_did;
+ uint8_t vp_idx;
+ short pur_bytes_rcvd;
+ unsigned short pur_nphdl;
+ unsigned int pur_rx_xchg_address;
+};
+
+struct purexevent {
+ struct pur_ninfo pur_info;
+ unsigned char *msgp;
+ u32 msgp_len;
+};
+
+#define N_UNDEF 0
+#define N_PUREX 1
+struct enode {
+ struct list_head list;
+ struct dinfo dinfo;
+ uint32_t ntype;
+ union {
+ struct purexevent purexinfo;
+ } u;
+};
+#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
new file mode 100644
index 000000000000..58b718d35d19
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (C) 2018- Marvell
+ *
+ */
+#ifndef __QLA_EDIF_BSG_H
+#define __QLA_EDIF_BSG_H
+
+/* BSG Vendor specific commands */
+#define ELS_MAX_PAYLOAD 1024
+#ifndef WWN_SIZE
+#define WWN_SIZE 8
+#endif
+#define VND_CMD_APP_RESERVED_SIZE 32
+
+enum auth_els_sub_cmd {
+ SEND_ELS = 0,
+ SEND_ELS_REPLY,
+ PULL_ELS,
+};
+
+struct extra_auth_els {
+ enum auth_els_sub_cmd sub_cmd;
+ uint32_t extra_rx_xchg_address;
+ uint8_t extra_control_flags;
+#define BSG_CTL_FLAG_INIT 0
+#define BSG_CTL_FLAG_LS_ACC 1
+#define BSG_CTL_FLAG_LS_RJT 2
+#define BSG_CTL_FLAG_TRM 3
+ uint8_t extra_rsvd[3];
+} __packed;
+
+struct qla_bsg_auth_els_request {
+ struct fc_bsg_request r;
+ struct extra_auth_els e;
+};
+
+struct qla_bsg_auth_els_reply {
+ struct fc_bsg_reply r;
+ uint32_t rx_xchg_address;
+};
+
+struct app_id {
+ int app_vid;
+ uint8_t app_key[32];
+} __packed;
+
+struct app_start_reply {
+ uint32_t host_support_edif;
+ uint32_t edif_enode_active;
+ uint32_t edif_edb_active;
+ uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_start {
+ struct app_id app_info;
+ uint32_t prli_to;
+ uint32_t key_shred;
+ uint8_t app_start_flags;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+} __packed;
+
+struct app_stop {
+ struct app_id app_info;
+ char buf[16];
+} __packed;
+
+struct app_plogi_reply {
+ uint32_t prli_status;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RECFG_TIME 1
+#define RECFG_BYTES 2
+
+struct app_rekey_cfg {
+ struct app_id app_info;
+ uint8_t rekey_mode;
+ port_id_t d_id;
+ uint8_t force;
+ union {
+ int64_t bytes;
+ int64_t time;
+ } rky_units;
+
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ port_id_t remote_pid;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo {
+ port_id_t remote_pid;
+ uint8_t remote_wwpn[WWN_SIZE];
+ uint8_t remote_type;
+#define VND_CMD_RTYPE_UNKNOWN 0
+#define VND_CMD_RTYPE_TARGET 1
+#define VND_CMD_RTYPE_INITIATOR 2
+ uint8_t remote_state;
+ uint8_t auth_state;
+ uint8_t rekey_mode;
+ int64_t rekey_count;
+ int64_t rekey_config_value;
+ int64_t rekey_consumed_value;
+
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+/* AUTH States */
+#define VND_CMD_AUTH_STATE_UNDEF 0
+#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1
+#define VND_CMD_AUTH_STATE_NEEDED 2
+#define VND_CMD_AUTH_STATE_ELS_RCVD 3
+#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4
+
+struct app_pinfo_reply {
+ uint8_t port_count;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ struct app_pinfo ports[0];
+} __packed;
+
+struct app_sinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_sinfo {
+ uint8_t remote_wwpn[WWN_SIZE];
+ int64_t rekey_count;
+ uint8_t rekey_mode;
+ int64_t tx_bytes;
+ int64_t rx_bytes;
+} __packed;
+
+struct app_stats_reply {
+ uint8_t elem_count;
+ struct app_sinfo elem[0];
+} __packed;
+
+struct qla_sa_update_frame {
+ struct app_id app_info;
+ uint16_t flags;
+#define SAU_FLG_INV 0x01 /* delete key */
+#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */
+#define SAU_FLG_FORCE_DELETE 0x08
+#define SAU_FLG_GMAC_MODE 0x20 /*
+ * GMAC mode is cleartext for the IO
+ * (i.e. NULL encryption)
+ */
+#define SAU_FLG_KEY128 0x40
+#define SAU_FLG_KEY256 0x80
+ uint16_t fast_sa_index:10,
+ reserved:6;
+ uint32_t salt;
+ uint32_t spi;
+ uint8_t sa_key[32];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t port_id;
+} __packed;
+
+// used for edif mgmt bsg interface
+#define QL_VND_SC_UNDEF 0
+#define QL_VND_SC_SA_UPDATE 1
+#define QL_VND_SC_APP_START 2
+#define QL_VND_SC_APP_STOP 3
+#define QL_VND_SC_AUTH_OK 4
+#define QL_VND_SC_AUTH_FAIL 5
+#define QL_VND_SC_REKEY_CONFIG 6
+#define QL_VND_SC_GET_FCINFO 7
+#define QL_VND_SC_GET_STATS 8
+
+/* Application interface data structure for rtn data */
+#define EXT_DEF_EVENT_DATA_SIZE 64
+struct edif_app_dbell {
+ uint32_t event_code;
+ uint32_t event_data_size;
+ union {
+ port_id_t port_id;
+ uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE];
+ };
+} __packed;
+
+struct edif_sa_update_aen {
+ port_id_t port_id;
+ uint32_t key_type; /* Tx (1) or RX (2) */
+ uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
+ uint8_t reserved[16];
+} __packed;
+
+#define QL_VND_SA_STAT_SUCCESS 0
+#define QL_VND_SA_STAT_FAILED 1
+#define QL_VND_SA_STAT_TIMEOUT 2
+#define QL_VND_SA_STAT_ERROR 3
+
+#define QL_VND_RX_SA_KEY 1
+#define QL_VND_TX_SA_KEY 2
+
+/* App defines for plogi auth'd ok and plogi auth bad requests */
+struct auth_complete_cmd {
+ struct app_id app_info;
+#define PL_TYPE_WWPN 1
+#define PL_TYPE_DID 2
+ uint32_t type;
+ union {
+ uint8_t wwpn[WWN_SIZE];
+ port_id_t d_id;
+ } u;
+ uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RX_DELAY_DELETE_TIMEOUT 20
+
+#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 49df418030e4..c257af8d87fd 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -82,10 +82,11 @@ struct port_database_24xx {
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint8_t reserved_3[4];
+ uint8_t reserved_3[2];
+ uint16_t nvme_first_burst_size;
uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */
uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */
- uint16_t nvme_first_burst_size;
+ uint8_t secure_login;
uint8_t reserved_4[14];
};
@@ -489,6 +490,9 @@ struct cmd_type_6 {
struct scsi_lun lun; /* FCP LUN (BE). */
__le16 control_flags; /* Control flags. */
+#define CF_NEW_SA BIT_12
+#define CF_EN_EDIF BIT_9
+#define CF_ADDITIONAL_PARAM_BLK BIT_8
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
@@ -611,6 +615,7 @@ struct sts_entry_24xx {
union {
__le16 reserved_1;
__le16 nvme_rsp_pyld_len;
+ __le16 edif_sa_index; /* edif sa_index used for initiator read data */
};
__le16 state_flags; /* State flags. */
@@ -896,6 +901,7 @@ struct logio_entry_24xx {
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
+#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */
#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */
#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
@@ -920,6 +926,8 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
__le32 io_parameter[11]; /* General I/O parameters. */
+#define LIO_COMM_FEAT_FCSP BIT_21
+#define LIO_COMM_FEAT_CIO BIT_31
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2f867da822ae..2b8bdb146a8f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -12,6 +12,7 @@
* Global Function Prototypes in qla_init.c source file.
*/
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
@@ -130,6 +131,18 @@ void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
void qla_init_iocb_limit(scsi_qla_host_t *);
+void qla_edif_list_del(fc_port_t *fcport);
+void qla_edif_sadb_release(struct qla_hw_data *ha);
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha);
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha);
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24);
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *ctio);
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport);
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+const char *sc_to_str(uint16_t cmd);
/*
* Global Data in qla_os.c source file.
@@ -175,6 +188,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
+extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
@@ -236,6 +250,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
struct purex_item *pkt);
void qla_pci_set_eeh_busy(struct scsi_qla_host *);
void qla_schedule_eeh_work(struct scsi_qla_host *);
+struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
+ int index, int dir);
/*
* Global Functions in qla_mid.c source file.
@@ -280,7 +296,10 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
-
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *pkt, struct qla_els_pt_arg *a);
+cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha,
+ struct req_que *que);
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -310,6 +329,8 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
+ struct qla_work_evt *e);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -578,6 +599,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
+void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -640,6 +662,8 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, u8 *buf, u32 buf_len);
/*
* Global Function Prototypes in qla_dbg.c source file.
@@ -879,6 +903,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+extern int qla24xx_sadb_update(struct bsg_job *bsg_job);
+extern int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl);
/* 83xx related functions */
void qla83xx_fw_dump(scsi_qla_host_t *vha);
@@ -923,6 +950,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
response_t *);
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
@@ -935,7 +963,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
-void qlt_remove_target_resources(struct qla_hw_data *);
+void qla_remove_hostmap(struct qla_hw_data *ha);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
@@ -950,6 +978,24 @@ extern void qla_nvme_abort_process_comp_status
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+
+/* qla_edif.c */
+fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
+void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
+ fc_port_t *fcport);
+void qla_edb_stop(scsi_qla_host_t *vha);
+ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
+int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
+void qla_enode_init(scsi_qla_host_t *vha);
+void qla_enode_stop(scsi_qla_host_t *vha);
+void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport);
+void qla_edb_init(scsi_qla_host_t *vha);
+int qla28xx_start_scsi_edif(srb_t *sp);
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp);
+void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sa_update_28xx *pkt);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#define QLA2XX_HW_ERROR BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5b6e04a91a18..b16b7d16be12 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -632,7 +632,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
- if (vha->flags.nvme_enabled)
+ if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
@@ -2826,6 +2826,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (fcport->disc_state == DSC_DELETE_PEND)
return;
+ /* We will figure-out what happen after AUTH completes */
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+ return;
+
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
ql_dbg(ql_dbg_disc, vha, 0x20d3,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f8f471157109..ad0d3f536a31 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -34,7 +34,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
struct event_arg *ea);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
@@ -343,19 +342,28 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
- else
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ } else {
+ if (vha->hw->flags.edif_enabled &&
+ vha->e_dbell.db_flags & EDB_ACTIVE) {
+ lio->u.logio.flags |=
+ (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24);
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ }
+ }
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_log(ql_log_warn, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n",
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
+ fcport->d_id.b24, fcport->login_retry);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -378,7 +386,7 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
{
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
- qlt_logo_completion_handler(sp->fcport, res);
+ qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
sp->free(sp);
}
@@ -404,10 +412,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_logout_sp_done;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->port_name);
+ fcport->port_name, fcport->explicit_logout);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -692,11 +700,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc,
fcport->login_gen, fcport->last_login_gen,
- fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -822,6 +830,13 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla2x00_post_async_adisc_work(vha, fcport,
data);
break;
+ case DSC_LS_PLOGI_COMP:
+ if (vha->hw->flags.edif_enabled) {
+ /* check to see if App support Secure */
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ break;
+ }
+ fallthrough;
case DSC_LS_PORT_UNAVAIL:
default:
if (fcport->loop_id == FC_NO_LOOP_ID) {
@@ -1191,7 +1206,7 @@ done:
sp->free(sp);
}
-static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -1418,6 +1433,60 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
+static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rc = 0;
+
+ if (pd->secure_login) {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "Secure Login established on %8phC\n",
+ fcport->port_name);
+ fcport->edif.secured_login = 1;
+ fcport->edif.non_secured_login = 0;
+ fcport->flags |= FCF_FCSP_DEVICE;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "non-Secure Login %8phC",
+ fcport->port_name);
+ fcport->edif.secured_login = 0;
+ fcport->edif.non_secured_login = 1;
+ }
+ if (vha->hw->flags.edif_enabled) {
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
+ /* Start edif prli timer & ring doorbell for app */
+ fcport->edif.rx_sa_set = 0;
+ fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = 0;
+ fcport->edif.tx_sa_pending = 0;
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ fcport->d_id.b24);
+
+ if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ef,
+ "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->edif.app_started = 1;
+ fcport->edif.app_sess_online = 1;
+
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
+ fcport->d_id.b24, 0, fcport);
+ }
+
+ rc = 1;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
static
void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
@@ -1460,8 +1529,11 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
break;
- case PDS_PLOGI_PENDING:
case PDS_PLOGI_COMPLETE:
+ if (qla_chk_secure_login(vha, fcport, pd))
+ return;
+ fallthrough;
+ case PDS_PLOGI_PENDING:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
/* Set discovery state back to GNL to Relogin attempt */
@@ -2053,26 +2125,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (NVME_TARGET(vha->hw, ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2117,
- "%s %d %8phC post prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_prli_work(vha, ea->fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id, ea->fcport->d_id.b24);
-
+ if (vha->hw->flags.edif_enabled) {
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
- ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ } else {
+ if (NVME_TARGET(vha->hw, fcport)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, fcport->d_id.b24);
+
+ set_bit(fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ fcport->logout_on_delete = 1;
+ fcport->send_els_logo = 0;
+ fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ }
}
break;
case MBS_COMMAND_ERROR:
@@ -3877,7 +3961,8 @@ enable_82xx_npiv:
}
/* Enable PUREX PASSTHRU */
- if (ql2xrdpenable || ha->flags.scm_supported_f)
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
qla25xx_set_els_cmds_supported(vha);
} else
goto failed;
@@ -4062,7 +4147,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
}
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio &&
+ if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
(IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
@@ -4090,7 +4175,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_8;
}
- if (ql2xrdpenable || ha->flags.scm_supported_f)
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
/* Enable Async 8130/8131 events -- transceiver insertion/removal */
@@ -5071,6 +5157,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
+ spin_lock_init(&fcport->edif.sa_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
+ INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
+
+ if (vha->e_dbell.db_flags == EDB_ACTIVE)
+ fcport->edif.app_started = 1;
+
+ spin_lock_init(&fcport->edif.indx_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
+
return fcport;
}
@@ -5084,8 +5180,13 @@ qla2x00_free_fcport(fc_port_t *fcport)
fcport->ct_desc.ct_sns = NULL;
}
+
+ qla_edif_flush_sa_ctl_lists(fcport);
list_del(&fcport->list);
qla2x00_clear_loop_id(fcport);
+
+ qla_edif_list_del(fcport);
+
kfree(fcport);
}
@@ -5204,6 +5305,12 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"LOOP READY.\n");
ha->flags.fw_init_done = 1;
+ if (vha->hw->flags.edif_enabled &&
+ vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* wake up authentication app to get ready */
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0);
+ }
+
/*
* Process any ATIO queue entries that came in
* while we weren't online.
@@ -5223,7 +5330,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"%s *** FAILED ***.\n", __func__);
} else {
ql_dbg(ql_dbg_disc, vha, 0x206b,
- "%s: exiting normally.\n", __func__);
+ "%s: exiting normally. local port wwpn %8phN id %06x)\n",
+ __func__, vha->port_name, vha->d_id.b24);
}
/* Restore state if a resync event occurred during processing */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d0ee843f6b04..625d6b237fb2 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -118,7 +118,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
-static inline cont_a64_entry_t *
+cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
@@ -145,7 +145,6 @@ inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0;
@@ -166,7 +165,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- if (guard & SHOST_DIX_GUARD_IP)
+ if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
else
*fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -176,6 +175,9 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
+ if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
+ *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
return scsi_prot_sg_count(cmd);
}
@@ -772,74 +774,19 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- switch (scsi_get_prot_type(cmd)) {
- case SCSI_PROT_DIF_TYPE0:
- /*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
- */
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
- case SCSI_PROT_DIF_TYPE2:
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- /* enable ALL bytes of the ref tag */
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
- case SCSI_PROT_DIF_TYPE3:
- pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
- pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
- 0x00;
- break;
-
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
- case SCSI_PROT_DIF_TYPE1:
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
+ pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
- /* enable ALL bytes of the ref tag */
+ if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
+ qla2x00_hba_err_chk_enabled(sp)) {
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
pkt->ref_tag_mask[2] = 0xff;
pkt->ref_tag_mask[3] = 0xff;
- break;
}
+
+ pkt->app_tag = cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
}
int
@@ -905,7 +852,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
memset(&sgx, 0, sizeof(struct qla2_sgx));
if (sp) {
cmd = GET_CMD_SP(sp);
- prot_int = cmd->device->sector_size;
+ prot_int = scsi_prot_interval(cmd);
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
@@ -1963,6 +1910,9 @@ qla2xxx_start_scsi_mq(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
+ if (sp->fcport->edif.enable)
+ return qla28xx_start_scsi_edif(sp);
+
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -2466,6 +2416,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
+ logio->control_flags |=
+ cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
+ logio->io_parameter[0] =
+ cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2806,7 +2762,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -3106,6 +3061,43 @@ done:
return rval;
}
+/* it is assume qpair lock is held */
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *els_iocb,
+ struct qla_els_pt_arg *a)
+{
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = QLA_SKIP_HANDLE;
+ els_iocb->nport_handle = a->nport_handle;
+ els_iocb->rx_xchg_address = a->rx_xchg_address;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
+ els_iocb->vp_index = a->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = cpu_to_le16(0);
+ els_iocb->opcode = a->els_opcode;
+
+ els_iocb->d_id[0] = a->did.b.al_pa;
+ els_iocb->d_id[1] = a->did.b.area;
+ els_iocb->d_id[2] = a->did.b.domain;
+ /* For SID the byte order is different than DID */
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
+ els_iocb->s_id[2] = vha->d_id.b.area;
+ els_iocb->s_id[0] = vha->d_id.b.domain;
+
+ els_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+ els_iocb->tx_len = cpu_to_le32(a->tx_len);
+ put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
+
+ els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
+ els_iocb->rx_len = cpu_to_le32(a->rx_len);
+ put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
+}
+
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
@@ -3700,6 +3692,16 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.srr_reject_code = 0;
nack->u.isp24.srr_reject_code_expl = 0;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
+ sp->vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
+ "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
+ sp->name, sp->handle, sp->fcport->loop_id,
+ sp->fcport->d_id.b24);
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
}
/*
@@ -3804,6 +3806,10 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_CMD_HST:
qla24xx_els_iocb(sp, pkt);
break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
+ qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg);
+ ((struct els_entry_24xx *)pkt)->handle = sp->handle;
+ break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_ct_iocb(sp, pkt) :
@@ -3851,6 +3857,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_PRLO_CMD:
qla24xx_prlo_iocb(sp, pkt);
break;
+ case SRB_SA_UPDATE:
+ qla24xx_sa_update_iocb(sp, pkt);
+ break;
+ case SRB_SA_REPLACE:
+ qla24xx_sa_replace_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d9fb093a60a1..e8928fd83049 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -170,6 +170,149 @@ qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
}
/**
+ * __qla_consume_iocb - this routine is used to tell fw driver has processed
+ * or consumed the head IOCB along with the continuation IOCB's from the
+ * provided respond queue.
+ * @vha: host adapter pointer
+ * @pkt: pointer to current packet. On return, this pointer shall move
+ * to the next packet.
+ * @rsp: respond queue pointer.
+ *
+ * it is assumed pkt is the head iocb, not the continuation iocbk
+ */
+void __qla_consume_iocb(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp)
+{
+ struct rsp_que *rsp_q = *rsp;
+ response_t *new_pkt;
+ uint16_t entry_count_remaining;
+ struct purex_entry_24xx *purex = *pkt;
+
+ entry_count_remaining = purex->entry_count;
+ while (entry_count_remaining > 0) {
+ new_pkt = rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+
+ new_pkt->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ --entry_count_remaining;
+ }
+}
+
+/**
+ * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
+ * and save to provided buffer
+ * @vha: host adapter pointer
+ * @pkt: pointer Purex IOCB
+ * @rsp: respond queue
+ * @buf: extracted ELS payload copy here
+ * @buf_len: buffer length
+ */
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
+{
+ struct purex_entry_24xx *purex = *pkt;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0;
+ uint16_t entry_count_remaining;
+ u16 tpad;
+
+ entry_count_remaining = purex->entry_count;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
+ - PURX_ELS_HEADER_SIZE;
+
+ /*
+ * end of payload may not end in 4bytes boundary. Need to
+ * round up / pad for room to swap, before saving data
+ */
+ tpad = roundup(total_bytes, 4);
+
+ if (buf_len < tpad) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "%s buffer is too small %d < %d\n",
+ __func__, buf_len, tpad);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return -EIO;
+ }
+
+ pending_bytes = total_bytes = tpad;
+ no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
+ sizeof(purex->els_frame_payload) : pending_bytes;
+
+ memcpy(buf, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ return -EIO;
+ }
+ } while (entry_count_remaining > 0);
+
+ be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
+
+ return 0;
+}
+
+/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
* @dev_id: SCSI driver HA context
@@ -1727,6 +1870,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
srb_t *sp;
uint16_t index;
+ if (pkt->handle == QLA_SKIP_HANDLE)
+ return NULL;
+
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
@@ -1971,7 +2117,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
static void
-qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
@@ -1982,18 +2128,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
- int res;
+ int res, logit = 1;
struct srb_iocb *els;
+ uint n;
+ scsi_qla_host_t *vha;
+ struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
- sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
if (!sp)
return;
+ bsg_job = sp->u.bsg_job;
+ vha = sp->vha;
type = NULL;
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+ fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+
switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
+ type = "rpt hst";
+ break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
type = "els";
+ {
+ struct els_entry_24xx *els = (void *)pkt;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd),
+ e->d_id[2], e->d_id[1], e->d_id[0],
+ comp_status, p->e.extra_rx_xchg_address, bsg_job);
+
+ if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
+ if (sp->remap.remapped) {
+ n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ sp->remap.rsp.buf,
+ sp->remap.rsp.len);
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
+ "%s: SG copied %x of %x\n",
+ __func__, n, sp->remap.rsp.len);
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s: NOT REMAPPED (error)...!!!\n",
+ __func__);
+ }
+ }
+ }
break;
case SRB_CT_CMD:
type = "ct pass-through";
@@ -2023,10 +2209,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
}
- comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le32_to_cpu(ese->error_subcode_1);
- fw_status[2] = le32_to_cpu(ese->error_subcode_2);
-
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
@@ -2040,15 +2222,52 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
res = DID_OK << 16;
els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
ese->total_byte_count));
+
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
+ __func__, e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ logit = 0;
+ }
+
+ } else if (comp_status == CS_PORT_LOGGED_OUT) {
+ els->u.els_plogi.len = 0;
+ res = DID_IMM_RETRY << 16;
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
}
+
+ if (logit) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ } else {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ }
+ }
}
- ql_dbg(ql_dbg_disc, vha, 0x503f,
- "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
- type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
@@ -2153,6 +2372,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
+ lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
+ if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
+ fcport->flags |= FCF_FCSP_DEVICE;
+
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
fcport->port_type = FCT_TARGET;
@@ -2969,9 +3192,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ /* Fast path completion. */
+ qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
sp->qpair->cmd_completion_cnt++;
- /* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -3366,6 +3590,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
}
break;
+ case SA_UPDATE_IOCB_TYPE:
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3453,6 +3678,63 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
}
/**
+ * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
+ * before iocb processing can start.
+ * @vha: host adapter pointer
+ * @rsp: respond queue
+ * @pkt: head iocb describing how many continuation iocb
+ * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
+ */
+static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
+{
+ int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
+ response_t *end_pkt;
+ int rc = 0;
+ u32 rsp_q_in;
+
+ if (pkt->entry_count == 1)
+ return rc;
+
+ /* ring_index was pre-increment. set it back to current pkt */
+ if (rsp->ring_index == 0)
+ start_pkt_ring_index = rsp->length - 1;
+ else
+ start_pkt_ring_index = rsp->ring_index - 1;
+
+ if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
+ end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
+ rsp->length - 1;
+ else
+ end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+
+ end_pkt = rsp->ring + end_pkt_ring_index;
+
+ /* next pkt = end_pkt + 1 */
+ n_ring_index = end_pkt_ring_index + 1;
+ if (n_ring_index >= rsp->length)
+ n_ring_index = 0;
+
+ rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
+ rd_reg_dword(rsp->rsp_q_in);
+
+ /* rsp_q_in is either wrapped or pointing beyond endpkt */
+ if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
+ rsp_q_in >= n_ring_index)
+ /* all IOCBs arrived. */
+ rc = 0;
+ else
+ rc = -EIO;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
+ "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
+ rsp_q_in, rc);
+
+ return rc;
+}
+
+/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
* @rsp: response queue
@@ -3592,12 +3874,26 @@ process_err:
qla27xx_process_purex_fpin);
break;
+ case ELS_AUTH_ELS:
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "Defer processing ELS opcode %#x...\n",
+ purex_entry->els_frame_payload[3]);
+ return;
+ }
+ qla24xx_auth_els(vha, (void **)&pkt, &rsp);
+ break;
default:
ql_log(ql_log_warn, vha, 0x509c,
"Discarding ELS Request opcode 0x%x\n",
purex_entry->els_frame_payload[3]);
}
break;
+ case SA_UPDATE_IOCB_TYPE:
+ qla28xx_sa_update_iocb_entry(vha, rsp->req,
+ (struct sa_update_28xx *)pkt);
+ break;
+
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9f3ad8aa649c..4dd008e06617 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -739,7 +739,7 @@ again:
mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
- mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
+ mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
} else {
mcp->mb[1] = LSW(risc_addr);
mcp->out_mb |= MBX_1;
@@ -795,6 +795,12 @@ again:
}
}
+ if (IS_QLA28XX(ha) && (mcp->mb[5] & BIT_10) && ql2xsecenable) {
+ ha->flags.edif_enabled = 1;
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s: edif is enabled\n", __func__);
+ }
+
done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
"Done %s.\n", __func__);
@@ -4946,7 +4952,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
return rval;
}
-#define PUREX_CMD_COUNT 2
+#define PUREX_CMD_COUNT 4
int
qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
{
@@ -4954,6 +4960,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
uint8_t *els_cmd_map;
+ uint8_t active_cnt = 0;
dma_addr_t els_cmd_map_dma;
uint8_t cmd_opcode[PUREX_CMD_COUNT];
uint8_t i, index, purex_bit;
@@ -4975,10 +4982,20 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
}
/* List of Purex ELS */
- cmd_opcode[0] = ELS_FPIN;
- cmd_opcode[1] = ELS_RDP;
+ if (ql2xrdpenable) {
+ cmd_opcode[active_cnt] = ELS_RDP;
+ active_cnt++;
+ }
+ if (ha->flags.scm_supported_f) {
+ cmd_opcode[active_cnt] = ELS_FPIN;
+ active_cnt++;
+ }
+ if (ha->flags.edif_enabled) {
+ cmd_opcode[active_cnt] = ELS_AUTH_ELS;
+ active_cnt++;
+ }
- for (i = 0; i < PUREX_CMD_COUNT; i++) {
+ for (i = 0; i < active_cnt; i++) {
index = cmd_opcode[i] / 8;
purex_bit = cmd_opcode[i] % 8;
els_cmd_map[index] |= 1 << purex_bit;
@@ -6588,6 +6605,12 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "%8phC SVC Param w3 %02x%02x",
+ fcport->port_name,
+ pd->prli_svc_param_word_3[1],
+ pd->prli_svc_param_word_3[0]);
+
if (NVME_TARGET(vha->hw, fcport)) {
fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c7caf322f445..078d596dbd49 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -158,6 +158,10 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
+ if (vha->hw->flags.edif_enabled)
+ /* delete sessions and flush sa_indexes */
+ qla2x00_wait_for_sess_deletion(vha);
+
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@@ -166,7 +170,8 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
- qla2x00_mark_all_devices_lost(vha);
+ if (!vha->hw->flags.edif_enabled)
+ qla2x00_wait_for_sess_deletion(vha);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 3e5c70a1d969..fdac3f7fa080 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -463,6 +463,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = 0;
}
+
+ if (sp->fcport->edif.enable && fd->io_dir != 0)
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+
/* Set BIT_13 of control flags for Async event */
if (vha->flags.nvme2_enabled &&
cmd->sqe.common.opcode == nvme_admin_async_event) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 615e44af1ca6..11aad97dfca8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2166,7 +2166,6 @@ qla82xx_poll(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
- int status = 0;
uint32_t stat;
uint32_t host_int = 0;
uint16_t mb[8];
@@ -2195,7 +2194,6 @@ qla82xx_poll(int irq, void *dev_id)
case 0x10:
case 0x11:
qla82xx_mbx_completion(vha, MSW(stat));
- status |= MBX_INTERRUPT;
break;
case 0x12:
mb[0] = MSW(stat);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index cedd558f65eb..126ac7e24ea9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -53,6 +53,11 @@ static struct kmem_cache *ctx_cachep;
*/
uint ql_errlev = 0x8001;
+int ql2xsecenable;
+module_param(ql2xsecenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xsecenable,
+ "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
+
static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
@@ -849,7 +854,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint16_t hwq;
struct qla_qpair *qpair = NULL;
- tag = blk_mq_unique_tag(cmd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
@@ -1120,12 +1125,28 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
int res;
+ /* Return 0 = sleep, x=wake */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00ec,
"tgt %p, fcport_count=%d\n",
vha, vha->fcport_count);
res = (vha->fcport_count == 0);
+ if (res) {
+ struct fc_port *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->deleted != QLA_SESS_DELETED) {
+ /* session(s) may not be fully logged in
+ * (ie fcport_count=0), but session
+ * deletion thread(s) may be inflight.
+ */
+
+ res = 0;
+ break;
+ }
+ }
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
@@ -1742,7 +1763,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && blk_mq_request_started(cmd->request))
+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res);
} else {
sp->done(sp, res);
@@ -2835,6 +2856,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
+ spin_lock_init(&ha->sadb_lock);
+ INIT_LIST_HEAD(&ha->sadb_tx_index_list);
+ INIT_LIST_HEAD(&ha->sadb_rx_index_list);
+
+ spin_lock_init(&ha->sadb_fp_lock);
+
+ if (qla_edif_sadb_build_free_pool(ha)) {
+ kfree(ha);
+ goto disable_device;
+ }
+
atomic_set(&ha->nvme_active_aen_cnt, 0);
/* Clear our data area */
@@ -3460,6 +3492,8 @@ skip_dpc:
return 0;
probe_failed:
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3762,6 +3796,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
vfree(base_vha->scan.l);
@@ -3795,7 +3831,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
- qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -3867,6 +3902,9 @@ qla2x00_free_device(scsi_qla_host_t *vha)
qla82xx_md_free(vha);
+ qla_edif_sadb_release_free_pool(ha);
+ qla_edif_sadb_release(ha);
+
qla2x00_free_queues(ha);
}
@@ -3919,6 +3957,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport);
}
+
+ qla_edif_sess_down(vha, fcport);
/*
* We may need to retry the login, so don't change the state of the
* port but do the retries.
@@ -3972,15 +4012,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
struct req_que **req, struct rsp_que **rsp)
{
char name[16];
+ int rc;
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
goto fail;
- if (qlt_mem_alloc(ha) < 0)
+ rc = btree_init32(&ha->host_map);
+ if (rc)
goto fail_free_init_cb;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_btree;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
@@ -3990,7 +4035,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
- if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
/* Allocate cache for CT6 Ctx. */
if (!ctx_cachep) {
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -4024,7 +4069,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
- if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+ if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
@@ -4264,8 +4309,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
goto fail_flt_buffer;
}
+ /* allocate the purex dma pool */
+ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ MAX_PAYLOAD, 8, 0);
+
+ if (!ha->purex_dma_pool) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "Unable to allocate purex_dma_pool.\n");
+ goto fail_flt;
+ }
+
+ ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
+ ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
+ ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
+
+ if (!ha->elsrej.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for els reject cmd.\n");
+ goto fail_elsrej;
+ }
+ ha->elsrej.c->er_cmd = ELS_LS_RJT;
+ ha->elsrej.c->er_reason = ELS_RJT_BUSY;
+ ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
return 0;
+fail_elsrej:
+ dma_pool_destroy(ha->purex_dma_pool);
+fail_flt:
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ ha->flt, ha->flt_dma);
+
fail_flt_buffer:
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
ha->sfp_data, ha->sfp_data_dma);
@@ -4356,6 +4429,8 @@ fail_free_gid_list:
ha->gid_list_dma = 0;
fail_free_tgt_mem:
qlt_mem_free(ha);
+fail_free_btree:
+ btree_destroy32(&ha->host_map);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -4772,10 +4847,21 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->dif_bundl_pool = NULL;
qlt_mem_free(ha);
+ qla_remove_hostmap(ha);
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
+
+ dma_pool_destroy(ha->purex_dma_pool);
+ ha->purex_dma_pool = NULL;
+
+ if (ha->elsrej.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
+ ha->elsrej.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
@@ -4837,6 +4923,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
+ qla_enode_init(vha);
+ qla_edb_init(vha);
+
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -5327,6 +5416,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
e->u.fcport.fcport, false);
break;
+ case QLA_EVT_SA_REPLACE:
+ qla24xx_issue_sa_replace_iocb(vha, e);
+ break;
}
if (rc == EAGAIN) {
@@ -5376,6 +5468,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (atomic_read(&fcport->state) != FCS_ONLINE &&
fcport->login_retry) {
if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
fcport->disc_state == DSC_LOGIN_COMPLETE)
continue;
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 060c89237777..a0aeba69513d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2936,7 +2936,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
liter += dburst - 1;
faddr += dburst - 1;
dwptr += dburst - 1;
- continue;
}
write_protect:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index eb47140a899f..c3a589659658 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -184,8 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
return QLA_SUCCESS;
}
-static inline
-struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
be_id_t d_id)
{
struct scsi_qla_host *host;
@@ -198,7 +197,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
key = be_to_port_id(d_id).b24;
- host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ host = btree_lookup32(&vha->hw->host_map, key);
if (!host)
ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
@@ -299,7 +298,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
goto abort;
}
- host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+ host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
@@ -348,7 +347,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
{
- struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
atio->u.isp24.fcp_hdr.d_id);
if (unlikely(NULL == host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03e,
@@ -577,6 +576,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
sp->fcport->logout_on_delete = 1;
sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
sp->fcport->send_els_logo = 0;
+
+ if (sp->fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0x20ef,
+ "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
+ sp->fcport->port_name);
+ qla2x00_set_fcport_disc_state(sp->fcport,
+ DSC_LOGIN_AUTH_PEND);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sp->fcport->d_id.b24);
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
+ 0, sp->fcport);
+ }
break;
case SRB_NACK_PRLI:
@@ -624,6 +635,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
case SRB_NACK_PLOGI:
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
c = "PLOGI";
+ if (vha->hw->flags.edif_enabled &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ fcport->flags |= FCF_FCSP_DEVICE;
+ }
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
@@ -693,7 +708,12 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
void qla24xx_delete_sess_fn(struct work_struct *work)
{
fc_port_t *fcport = container_of(work, struct fc_port, del_work);
- struct qla_hw_data *ha = fcport->vha->hw;
+ struct qla_hw_data *ha = NULL;
+
+ if (!fcport || !fcport->vha || !fcport->vha->hw)
+ return;
+
+ ha = fcport->vha->hw;
if (fcport->se_sess) {
ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -965,6 +985,19 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
+ if (ha->flags.edif_enabled &&
+ (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+ }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -972,6 +1005,7 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ INIT_LIST_HEAD(&logo.list);
if (!own)
qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
@@ -982,6 +1016,7 @@ void qlt_free_session_done(struct work_struct *work)
if (!own ||
(own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+ sess->logout_completed = 0;
rc = qla2x00_post_async_logout_work(vha, sess,
NULL);
if (rc != QLA_SUCCESS)
@@ -1278,8 +1313,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_log_warn, sess->vha, 0xe001,
- "Scheduling sess %p for deletion %8phC\n",
- sess, sess->port_name);
+ "Scheduling sess %p for deletion %8phC fc4_type %x\n",
+ sess, sess->port_name, sess->fc4_type);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
@@ -1720,6 +1755,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.srr_reject_code_expl = srr_explan;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+ /* TODO qualify this with EDIF enable */
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
+
ql_dbg(ql_dbg_tgt, vha, 0xe005,
"qla_target(%d): Sending 24xx Notify Ack %d\n",
vha->vp_idx, nack->u.isp24.status);
@@ -2571,6 +2612,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
struct ctio7_to_24xx *pkt;
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t temp;
+ struct qla_tgt_cmd *cmd = prm->cmd;
pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
prm->pkt = pkt;
@@ -2603,6 +2645,15 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+ if (cmd->edif) {
+ if (cmd->dma_data_direction == DMA_TO_DEVICE)
+ prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
+ if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+ prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
+
+ pkt->u.status0.edif_flags |= EF_EN_EDIF;
+ }
+
return 0;
}
@@ -3293,8 +3344,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (xmit_type & QLA_TGT_XMIT_STATUS) {
pkt->u.status0.scsi_status =
cpu_to_le16(prm.rq_result);
- pkt->u.status0.residual =
- cpu_to_le32(prm.residual);
+ if (!cmd->edif)
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+
pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(cmd, 0)) {
@@ -3941,6 +3994,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (cmd == NULL)
return;
+ if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
+ cmd->sess) {
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
+ (struct ctio7_from_24xx *)ctio);
+ }
+
se_cmd = &cmd->se_cmd;
cmd->cmd_sent_to_fw = 0;
@@ -4011,6 +4070,16 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
+
+ case CTIO_FAST_AUTH_ERR:
+ case CTIO_FAST_INCOMP_PAD_LEN:
+ case CTIO_FAST_INVALID_REQ:
+ case CTIO_FAST_SPI_ERR:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
"qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
@@ -4312,6 +4381,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_assign_qpair(vha, cmd);
cmd->reset_count = vha->hw->base_qpair->chip_reset;
cmd->vp_idx = vha->vp_idx;
+ cmd->edif = sess->edif.enable;
return cmd;
}
@@ -4727,6 +4797,17 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (vha->hw->flags.edif_enabled &&
+ !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+ iocb->u.isp24.status_subcode == ELS_PLOGI &&
+ !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
@@ -4792,6 +4873,16 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
+ sess->loop_id = loop_id;
+
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - send port online\n",
+ __func__, sess->port_name);
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sess->d_id.b24);
+ }
if (iocb->u.isp24.status_subcode == ELS_PRLI) {
sess->fw_login_state = DSC_LS_PRLI_PEND;
@@ -6444,15 +6535,15 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
-void qlt_remove_target_resources(struct qla_hw_data *ha)
+void qla_remove_hostmap(struct qla_hw_data *ha)
{
struct scsi_qla_host *node;
u32 key = 0;
- btree_for_each_safe32(&ha->tgt.host_map, key, node)
- btree_remove32(&ha->tgt.host_map, key);
+ btree_for_each_safe32(&ha->host_map, key, node)
+ btree_remove32(&ha->host_map, key);
- btree_destroy32(&ha->tgt.host_map);
+ btree_destroy32(&ha->host_map);
}
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
@@ -7080,8 +7171,7 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
- int rc;
-
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -7094,7 +7184,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
}
- mutex_init(&base_vha->vha_tgt.tgt_mutex);
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
INIT_LIST_HEAD(&base_vha->unknown_atio_list);
@@ -7103,11 +7192,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_clear_mode(base_vha);
- rc = btree_init32(&ha->tgt.host_map);
- if (rc)
- ql_log(ql_log_info, base_vha, 0xd03d,
- "Unable to initialize ha->host_map btree\n");
-
qlt_update_vp_map(base_vha, SET_VP_IDX);
}
@@ -7228,21 +7312,20 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
u32 key;
int rc;
- if (!QLA_TGT_MODE_ENABLED())
- return;
-
key = vha->d_id.b24;
switch (cmd) {
case SET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (!slot) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
"Save vha in host_map %p %06x\n", vha, key);
- rc = btree_insert32(&vha->hw->tgt.host_map,
+ rc = btree_insert32(&vha->hw->host_map,
key, vha, GFP_ATOMIC);
if (rc)
ql_log(ql_log_info, vha, 0xd03e,
@@ -7252,17 +7335,19 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
"replace existing vha in host_map %p %06x\n", vha, key);
- btree_update32(&vha->hw->tgt.host_map, key, vha);
+ btree_update32(&vha->hw->host_map, key, vha);
break;
case RESET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
"clear vha in host_map %p %06x\n", vha, key);
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (slot)
- btree_remove32(&vha->hw->tgt.host_map, key);
+ btree_remove32(&vha->hw->host_map, key);
vha->d_id.b24 = 0;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 01620f3eab39..156b950ca7e7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -176,6 +176,7 @@ struct nack_to_isp {
uint8_t reserved[2];
__le16 ox_id;
} __packed;
+#define NOTIFY_ACK_FLAGS_FCSP BIT_5
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
@@ -238,6 +239,10 @@ struct ctio_to_2xxx {
#define CTIO_PORT_LOGGED_OUT 0x29
#define CTIO_PORT_CONF_CHANGED 0x2A
#define CTIO_SRR_RECEIVED 0x45
+#define CTIO_FAST_AUTH_ERR 0x63
+#define CTIO_FAST_INCOMP_PAD_LEN 0x65
+#define CTIO_FAST_INVALID_REQ 0x66
+#define CTIO_FAST_SPI_ERR 0x67
#endif
#ifndef CTIO_RET_TYPE
@@ -408,7 +413,16 @@ struct ctio7_to_24xx {
struct {
__le16 reserved1;
__le16 flags;
- __le32 residual;
+ union {
+ __le32 residual;
+ struct {
+ uint8_t rsvd1;
+ uint8_t edif_flags;
+#define EF_EN_EDIF BIT_0
+#define EF_NEW_SA BIT_1
+ uint16_t rsvd2;
+ };
+ };
__le16 ox_id;
__le16 scsi_status;
__le32 relative_offset;
@@ -446,7 +460,7 @@ struct ctio7_from_24xx {
uint8_t vp_index;
uint8_t reserved1[5];
__le32 exchange_address;
- __le16 reserved2;
+ __le16 edif_sa_index;
__le16 flags;
__le32 residual;
__le16 ox_id;
@@ -875,6 +889,7 @@ struct qla_tgt_cmd {
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
+ unsigned int edif:1;
/*
* This variable may be set from outside the LIO and I/O completion
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index da11829fa12d..2e05dd74b5cb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.00.106-k"
+#define QLA2XXX_VERSION "10.02.00.107-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 106
+#define QLA_DRIVER_BETA_VER 107
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index f786ac2f5548..301bc09c8365 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -119,8 +119,8 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
* the interrupt_handler to think there are responses to be
* processed when there aren't.
*/
- ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
- ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+ ha->shadow_regs->req_q_out = cpu_to_le32(0);
+ ha->shadow_regs->rsp_q_in = cpu_to_le32(0);
wmb();
writel(0, &ha->reg->req_q_in);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index cbd1e6ffcd67..28eab07935ba 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -160,7 +160,7 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
- cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+ cmd_entry->ttlByteCnt = cpu_to_le32(0);
return;
}
@@ -288,7 +288,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- index = (uint32_t)cmd->request->tag;
+ index = scsi_cmd_to_rq(cmd)->tag;
/*
* Check to see if adapter is online before placing request on
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 187d78aa4f67..cd71074f3abe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -645,8 +645,8 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH);
init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
@@ -656,20 +656,20 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Set up required options. */
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_SESSION_MODE |
- FWOPT_INITIATOR_MODE);
+ cpu_to_le16(FWOPT_SESSION_MODE |
+ FWOPT_INITIATOR_MODE);
if (is_qla80XX(ha))
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+ cpu_to_le16(FWOPT_ENABLE_CRBDB);
- init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE);
init_fw_cb->add_fw_options = 0;
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+ cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+ cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
@@ -1613,7 +1613,7 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
@@ -1655,7 +1655,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
chap_table->secret_len = strlen(password);
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
if (is_qla40XX(ha)) {
chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
@@ -1721,7 +1721,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
mutex_lock(&ha->chap_sem);
chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
- if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) {
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
}
@@ -1784,7 +1784,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
free_index = i;
continue;
@@ -2105,18 +2105,18 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
if (conn->max_recv_dlength)
fw_ddb_entry->iscsi_max_rcv_data_seg_len =
- __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+ cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
if (sess->max_r2t)
fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
if (sess->first_burst)
fw_ddb_entry->iscsi_first_burst_len =
- __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+ cpu_to_le16((sess->first_burst / BYTE_UNITS));
if (sess->max_burst)
fw_ddb_entry->iscsi_max_burst_len =
- __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+ cpu_to_le16((sess->max_burst / BYTE_UNITS));
if (sess->time2wait)
fw_ddb_entry->iscsi_def_time2wait =
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 66a487795c53..47adff9f0506 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3658,7 +3658,7 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
"Do ROM fast read failed\n");
goto done_read;
}
- dwptr[i] = __constant_cpu_to_le32(val);
+ dwptr[i] = cpu_to_le32(val);
}
done_read:
@@ -3721,9 +3721,9 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
goto no_flash_data;
}
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
- if (flt->version != __constant_cpu_to_le16(1)) {
+ if (flt->version != cpu_to_le16(1)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -3826,7 +3826,7 @@ qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -3883,7 +3883,7 @@ qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
QLA82XX_IDC_PARAM_ADDR , 8);
- if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ if (*wptr == cpu_to_le32(0xffffffff)) {
ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
} else {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6ee7ea4c27e0..f1ea65c6e5f5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -702,7 +702,7 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
if ((*chap_entry)->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
*chap_entry = NULL;
} else {
rval = QLA_SUCCESS;
@@ -745,7 +745,7 @@ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if ((chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ cpu_to_le16(CHAP_VALID_COOKIE)) &&
(i > MAX_RESRV_CHAP_IDX)) {
free_index = i;
break;
@@ -794,7 +794,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
for (i = chap_tbl_idx; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+ cpu_to_le16(CHAP_VALID_COOKIE))
continue;
chap_rec->chap_tbl_idx = i;
@@ -923,7 +923,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
goto exit_delete_chap;
}
- chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+ chap_table->cookie = cpu_to_le16(0xFFFF);
offset = FLASH_CHAP_OFFSET |
(chap_tbl_idx * sizeof(struct ql4_chap_table));
@@ -6043,7 +6043,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
continue;
}
@@ -9282,7 +9282,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->request->timeout / HZ,
+ cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
@@ -9349,7 +9349,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+ ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index d84e218d32cb..8e7e833a36cc 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -890,7 +890,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
cmd->control_flags |= CFLAG_WRITE;
else
cmd->control_flags |= CFLAG_READ;
- cmd->time_out = Cmnd->request->timeout/HZ;
+ cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ;
memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d26025cf5de3..b241f9e3885c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -190,7 +190,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd);
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
new file mode 100644
index 000000000000..81c3853a2a80
--- /dev/null
+++ b/drivers/scsi/scsi_bsg.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bsg.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/sg.h>
+#include "scsi_priv.h"
+
+#define uptr64(val) ((void __user *)(uintptr_t)(val))
+
+static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout)
+{
+ struct scsi_request *sreq;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
+ if (hdr->protocol != BSG_PROTOCOL_SCSI ||
+ hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
+ return -EINVAL;
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
+ rq = blk_get_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ rq->timeout = timeout;
+
+ ret = -ENOMEM;
+ sreq = scsi_req(rq);
+ sreq->cmd_len = hdr->request_len;
+ if (sreq->cmd_len > BLK_MAX_CDB) {
+ sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
+ if (!sreq->cmd)
+ goto out_put_request;
+ }
+
+ ret = -EFAULT;
+ if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
+ goto out_free_cmd;
+ ret = -EPERM;
+ if (!scsi_cmd_allowed(sreq->cmd, mode))
+ goto out_free_cmd;
+
+ ret = 0;
+ if (hdr->dout_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+ hdr->dout_xfer_len, GFP_KERNEL);
+ } else if (hdr->din_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+ hdr->din_xfer_len, GFP_KERNEL);
+ }
+
+ if (ret)
+ goto out_free_cmd;
+
+ bio = rq->bio;
+ blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+
+ /*
+ * fill in all the output members
+ */
+ hdr->device_status = sreq->result & 0xff;
+ hdr->transport_status = host_byte(sreq->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(sreq->result))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->response_len = 0;
+
+ if (sreq->sense_len && hdr->response) {
+ int len = min_t(unsigned int, hdr->max_response_len,
+ sreq->sense_len);
+
+ if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
+ ret = -EFAULT;
+ else
+ hdr->response_len = len;
+ }
+
+ if (rq_data_dir(rq) == READ)
+ hdr->din_resid = sreq->resid_len;
+ else
+ hdr->dout_resid = sreq->resid_len;
+
+ blk_rq_unmap_user(bio);
+
+out_free_cmd:
+ scsi_req_free_cmd(scsi_req(rq));
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
+{
+ return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
+ dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
+}
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 90349498f686..6e50e81a8216 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -7,9 +7,18 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
+MODULE_LICENSE("GPL v2");
+
+/* Command group 3 is reserved and should never be used. */
+const unsigned char scsi_command_size_tbl[8] = {
+ 6, 10, 10, 12, 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10).
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5b3a20a140f9..31529d8add0d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3076,6 +3076,7 @@ static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
+ int ret = 0;
unsigned int i;
sector_t sector;
struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
@@ -3083,26 +3084,33 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
- int ret;
-
sector = start_sec + i;
sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
- ei_lba);
- if (ret) {
- dif_errors++;
- return ret;
+ /*
+ * Because scsi_debug acts as both initiator and
+ * target we proceed to verify the PI even if
+ * RDPROTECT=3. This is done so the "initiator" knows
+ * which type of error to return. Otherwise we would
+ * have to iterate over the PI twice.
+ */
+ if (scp->cmnd[1] >> 5) { /* RDPROTECT */
+ ret = dif_verify(sdt, lba2fake_store(sip, sector),
+ sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ break;
+ }
}
}
dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
- return 0;
+ return ret;
}
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -3196,12 +3204,29 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- read_unlock(macc_lckp);
- mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_read(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ }
+ break;
}
}
@@ -3232,28 +3257,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
-static void dump_sector(unsigned char *buf, int len)
-{
- int i, j, n;
-
- pr_err(">>> Sector Dump <<<\n");
- for (i = 0 ; i < len ; i += 16) {
- char b[128];
-
- for (j = 0, n = 0; j < 16; j++) {
- unsigned char c = buf[i+j];
-
- if (c >= 0x20 && c < 0x7e)
- n += scnprintf(b + n, sizeof(b) - n,
- " %c ", buf[i+j]);
- else
- n += scnprintf(b + n, sizeof(b) - n,
- "%02x ", buf[i+j]);
- }
- pr_err("%04d: %s\n", i, b);
- }
-}
-
static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
@@ -3299,10 +3302,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = piter.addr + ppage_offset;
daddr = diter.addr + dpage_offset;
- ret = dif_verify(sdt, daddr, sector, ei_lba);
- if (ret) {
- dump_sector(daddr, sdebug_sector_size);
- goto out;
+ if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
+ if (ret)
+ goto out;
}
sector++;
@@ -3480,12 +3483,29 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- write_unlock(macc_lckp);
- mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_write(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ }
+ break;
}
}
@@ -4702,7 +4722,7 @@ fini:
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u16 hwq;
- u32 tag = blk_mq_unique_tag(cmnd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -4715,7 +4735,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
static u32 get_tag(struct scsi_cmnd *cmnd)
{
- return blk_mq_unique_tag(cmnd->request);
+ return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
}
/* Queued (deferred) command completions converge here. */
@@ -5364,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
- bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
+ bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5567,8 +5587,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
if (unlikely(sd_dp->aborted)) {
- sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
- blk_abort_request(cmnd->request);
+ sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+ scsi_cmd_to_rq(cmnd)->tag);
+ blk_abort_request(scsi_cmd_to_rq(cmnd));
atomic_set(&sdeb_inject_pending, 0);
sd_dp->aborted = false;
}
@@ -7394,7 +7415,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
(u32)cmd[k]);
}
sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
- blk_mq_unique_tag(scp->request), b);
+ blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
}
if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index d33355ab6e14..c7080454aea9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -171,6 +171,7 @@ static struct {
{"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
+ {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36},
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 58a252c38992..b6c86cce57bf 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -242,7 +242,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
*/
static void scsi_eh_reset(struct scsi_cmnd *scmd)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_reset)
sdrv->eh_reset(scmd);
@@ -1182,7 +1182,7 @@ static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
static enum scsi_disposition
scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
@@ -1750,21 +1750,23 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
*/
int scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
+ struct request *req = scsi_cmd_to_rq(scmd);
+
switch (host_byte(scmd->result)) {
case DID_OK:
break;
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
+ return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
case DID_PARITY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
+ return req->cmd_flags & REQ_FAILFAST_DEV;
case DID_ERROR:
if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
return 0;
fallthrough;
case DID_SOFT_ERROR:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
+ return req->cmd_flags & REQ_FAILFAST_DRIVER;
}
if (!scsi_status_is_check_condition(scmd->result))
@@ -1775,8 +1777,7 @@ check_type:
* assume caller has checked sense and determined
* the check condition was retryable.
*/
- if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- blk_rq_is_passthrough(scmd->request))
+ if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
return 1;
return 0;
@@ -2376,7 +2377,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scmd = (struct scsi_cmnd *)(rq + 1);
scsi_init_command(dev, scmd);
- scmd->request = rq;
scmd->cmnd = scsi_req(rq)->cmd;
scmd->scsi_done = scsi_reset_provider_done_command;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 0d13610cd6bf..7b2b0a1581f4 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/cdrom.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -63,29 +64,6 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
return 1;
}
-/*
-
- * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
- *
- * dev is the SCSI device struct ptr, *(int *) arg is the length of the
- * input data, if any, not including the command string & counts,
- * *((int *)arg + 1) is the output buffer size in bytes.
- *
- * *(char *) ((int *) arg)[2] the actual command byte.
- *
- * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.
- *
- * This size *does not* include the initial lengths that were passed.
- *
- * The SCSI command is read from the memory location immediately after the
- * length words, and the input data is right after the command. The SCSI
- * routines know the command size based on the opcode decode.
- *
- * The output area is then filled in starting from the command byte.
- */
-
static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
int timeout, int retries)
{
@@ -189,10 +167,732 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
? -EFAULT: 0;
}
+static int sg_get_version(int __user *p)
+{
+ static const int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
-static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
+static int sg_set_timeout(struct scsi_device *sdev, int __user *p)
{
- char scsi_cmd[MAX_COMMAND_SIZE];
+ int timeout, err = get_user(timeout, p);
+
+ if (!err)
+ sdev->sg_timeout = clock_t_to_jiffies(timeout);
+
+ return err;
+}
+
+static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+ int val = min(sdev->sg_reserved_size,
+ queue_max_bytes(sdev->request_queue));
+
+ return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+ int size, err = get_user(size, p);
+
+ if (err)
+ return err;
+
+ if (size < 0)
+ return -EINVAL;
+
+ sdev->sg_reserved_size = min_t(unsigned int, size,
+ queue_max_bytes(sdev->request_queue));
+ return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+ return put_user(1, p);
+}
+
+static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp)
+{
+ struct scsi_idlun v = {
+ .dev_id = (sdev->id & 0xff) +
+ ((sdev->lun & 0xff) << 8) +
+ ((sdev->channel & 0xff) << 16) +
+ ((sdev->host->host_no & 0xff) << 24),
+ .host_unique_id = sdev->host->unique_id
+ };
+ if (copy_to_user(argp, &v, sizeof(struct scsi_idlun)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scsi_send_start_stop(struct scsi_device *sdev, int data)
+{
+ u8 cdb[MAX_COMMAND_SIZE] = { };
+
+ cdb[0] = START_STOP;
+ cdb[4] = data;
+ return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT,
+ NORMAL_RETRIES);
+}
+
+/*
+ * Check if the given command is allowed.
+ *
+ * Only a subset of commands are allowed for unprivileged users. Commands used
+ * to format the media, update the firmware, etc. are not permitted.
+ */
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
+{
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return true;
+
+ /* Anybody who can open the device can do a read-safe command */
+ switch (cmd[0]) {
+ /* Basic read-only commands */
+ case TEST_UNIT_READY:
+ case REQUEST_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_BUFFER:
+ case READ_DEFECT_DATA:
+ case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */
+ case READ_LONG:
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case LOG_SENSE:
+ case START_STOP:
+ case GPCMD_VERIFY_10:
+ case VERIFY_16:
+ case REPORT_LUNS:
+ case SERVICE_ACTION_IN_16:
+ case RECEIVE_DIAGNOSTIC:
+ case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */
+ case GPCMD_READ_BUFFER_CAPACITY:
+ /* Audio CD commands */
+ case GPCMD_PLAY_CD:
+ case GPCMD_PLAY_AUDIO_10:
+ case GPCMD_PLAY_AUDIO_MSF:
+ case GPCMD_PLAY_AUDIO_TI:
+ case GPCMD_PAUSE_RESUME:
+ /* CD/DVD data reading */
+ case GPCMD_READ_CD:
+ case GPCMD_READ_CD_MSF:
+ case GPCMD_READ_DISC_INFO:
+ case GPCMD_READ_DVD_STRUCTURE:
+ case GPCMD_READ_HEADER:
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ case GPCMD_READ_SUBCHANNEL:
+ case GPCMD_READ_TOC_PMA_ATIP:
+ case GPCMD_REPORT_KEY:
+ case GPCMD_SCAN:
+ case GPCMD_GET_CONFIGURATION:
+ case GPCMD_READ_FORMAT_CAPACITIES:
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ case GPCMD_GET_PERFORMANCE:
+ case GPCMD_SEEK:
+ case GPCMD_STOP_PLAY_SCAN:
+ /* ZBC */
+ case ZBC_IN:
+ return true;
+ /* Basic writing commands */
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_VERIFY:
+ case WRITE_12:
+ case WRITE_VERIFY_12:
+ case WRITE_16:
+ case WRITE_LONG:
+ case WRITE_LONG_2:
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ case WRITE_SAME_32:
+ case ERASE:
+ case GPCMD_MODE_SELECT_10:
+ case MODE_SELECT:
+ case LOG_SELECT:
+ case GPCMD_BLANK:
+ case GPCMD_CLOSE_TRACK:
+ case GPCMD_FLUSH_CACHE:
+ case GPCMD_FORMAT_UNIT:
+ case GPCMD_REPAIR_RZONE_TRACK:
+ case GPCMD_RESERVE_RZONE_TRACK:
+ case GPCMD_SEND_DVD_STRUCTURE:
+ case GPCMD_SEND_EVENT:
+ case GPCMD_SEND_OPC:
+ case GPCMD_SEND_CUE_SHEET:
+ case GPCMD_SET_SPEED:
+ case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL:
+ case GPCMD_LOAD_UNLOAD:
+ case GPCMD_SET_STREAMING:
+ case GPCMD_SET_READ_AHEAD:
+ /* ZBC */
+ case ZBC_OUT:
+ return (mode & FMODE_WRITE);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(scsi_cmd_allowed);
+
+static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ struct scsi_request *req = scsi_req(rq);
+
+ if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (!scsi_cmd_allowed(req->cmd, mode))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ req->cmd_len = hdr->cmd_len;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = sdev->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ struct scsi_request *req = scsi_req(rq);
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = req->result & 0xff;
+ hdr->masked_status = status_byte(req->result);
+ hdr->msg_status = COMMAND_COMPLETE;
+ hdr->host_status = host_byte(req->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(hdr->status))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = req->resid_len;
+ hdr->sb_len_wr = 0;
+
+ if (req->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
+
+ if (!copy_to_user(hdr->sbp, req->sense, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ r = blk_rq_unmap_user(bio);
+ if (!ret)
+ ret = r;
+
+ return ret;
+}
+
+static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ unsigned long start_time;
+ ssize_t ret = 0;
+ int writing = 0;
+ int at_head = 0;
+ struct request *rq;
+ struct scsi_request *req;
+ struct bio *bio;
+
+ if (hdr->interface_id != 'S')
+ return -EINVAL;
+
+ if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9))
+ return -EIO;
+
+ if (hdr->dxfer_len)
+ switch (hdr->dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ writing = 1;
+ break;
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ break;
+ }
+ if (hdr->flags & SG_FLAG_Q_AT_HEAD)
+ at_head = 1;
+
+ ret = -ENOMEM;
+ rq = blk_get_request(sdev->request_queue, writing ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ req = scsi_req(rq);
+
+ if (hdr->cmd_len > BLK_MAX_CDB) {
+ req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+ if (!req->cmd)
+ goto out_put_request;
+ }
+
+ ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
+ if (ret < 0)
+ goto out_free_cdb;
+
+ ret = 0;
+ if (hdr->iovec_count) {
+ struct iov_iter i;
+ struct iovec *iov = NULL;
+
+ ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
+ hdr->iovec_count, 0, &iov, &i);
+ if (ret < 0)
+ goto out_free_cdb;
+
+ /* SG_IO howto says that the shorter of the two wins */
+ iov_iter_truncate(&i, hdr->dxfer_len);
+
+ ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
+ kfree(iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
+ hdr->dxfer_len, GFP_KERNEL);
+
+ if (ret)
+ goto out_free_cdb;
+
+ bio = rq->bio;
+ req->retries = 0;
+
+ start_time = jiffies;
+
+ blk_execute_rq(disk, rq, at_head);
+
+ hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+ ret = scsi_complete_sghdr_rq(rq, hdr, bio);
+
+out_free_cdb:
+ scsi_req_free_cmd(req);
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+/**
+ * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @q: request queue to send scsi commands down
+ * @disk: gendisk to operate on (option)
+ * @mode: mode used to open the file through which the ioctl has been
+ * submitted
+ * @sic: userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q. If @file is non-NULL it's used to perform
+ * fine-grained permission checks that allow users to send down
+ * non-destructive SCSI commands. If the caller has a struct gendisk
+ * available it should be passed in as @disk to allow the low level
+ * driver to use the information contained in it. A non-NULL @disk
+ * is only allowed if the caller knows that the low level driver doesn't
+ * need it (e.g. in the scsi subsystem).
+ *
+ * Notes:
+ * - This interface is deprecated - users should use the SG_IO
+ * interface instead, as this is a more flexible approach to
+ * performing SCSI commands on a device.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accommodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ */
+static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
+ fmode_t mode, struct scsi_ioctl_command __user *sic)
+{
+ enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
+ struct request *rq;
+ struct scsi_request *req;
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL;
+
+ if (!sic)
+ return -EINVAL;
+
+ /*
+ * get in an out lengths, verify they don't exceed a page worth of data
+ */
+ if (get_user(in_len, &sic->inlen))
+ return -EFAULT;
+ if (get_user(out_len, &sic->outlen))
+ return -EFAULT;
+ if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+ return -EINVAL;
+ if (get_user(opcode, sic->data))
+ return -EFAULT;
+
+ bytes = max(in_len, out_len);
+ if (bytes) {
+ buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
+ if (!buffer)
+ return -ENOMEM;
+
+ }
+
+ rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto error_free_buffer;
+ }
+ req = scsi_req(rq);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+ /*
+ * get command and data to send to device, if any
+ */
+ err = -EFAULT;
+ req->cmd_len = cmdlen;
+ if (copy_from_user(req->cmd, sic->data, cmdlen))
+ goto error;
+
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+ err = -EPERM;
+ if (!scsi_cmd_allowed(req->cmd, mode))
+ goto error;
+
+ /* default. possible overridden later */
+ req->retries = 5;
+
+ switch (opcode) {
+ case SEND_DIAGNOSTIC:
+ case FORMAT_UNIT:
+ rq->timeout = FORMAT_UNIT_TIMEOUT;
+ req->retries = 1;
+ break;
+ case START_STOP:
+ rq->timeout = START_STOP_TIMEOUT;
+ break;
+ case MOVE_MEDIUM:
+ rq->timeout = MOVE_MEDIUM_TIMEOUT;
+ break;
+ case READ_ELEMENT_STATUS:
+ rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ break;
+ case READ_DEFECT_DATA:
+ rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+ req->retries = 1;
+ break;
+ default:
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ break;
+ }
+
+ if (bytes) {
+ err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+ if (err)
+ goto error;
+ }
+
+ blk_execute_rq(disk, rq, 0);
+
+ err = req->result & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (req->sense_len && req->sense) {
+ bytes = (OMAX_SB_LEN > req->sense_len) ?
+ req->sense_len : OMAX_SB_LEN;
+ if (copy_to_user(sic->data, req->sense, bytes))
+ err = -EFAULT;
+ }
+ } else {
+ if (copy_to_user(sic->data, buffer, out_len))
+ err = -EFAULT;
+ }
+
+error:
+ blk_put_request(rq);
+
+error_free_buffer:
+ kfree(buffer);
+
+ return err;
+}
+
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_sg_io_hdr hdr32 = {
+ .interface_id = hdr->interface_id,
+ .dxfer_direction = hdr->dxfer_direction,
+ .cmd_len = hdr->cmd_len,
+ .mx_sb_len = hdr->mx_sb_len,
+ .iovec_count = hdr->iovec_count,
+ .dxfer_len = hdr->dxfer_len,
+ .dxferp = (uintptr_t)hdr->dxferp,
+ .cmdp = (uintptr_t)hdr->cmdp,
+ .sbp = (uintptr_t)hdr->sbp,
+ .timeout = hdr->timeout,
+ .flags = hdr->flags,
+ .pack_id = hdr->pack_id,
+ .usr_ptr = (uintptr_t)hdr->usr_ptr,
+ .status = hdr->status,
+ .masked_status = hdr->masked_status,
+ .msg_status = hdr->msg_status,
+ .sb_len_wr = hdr->sb_len_wr,
+ .host_status = hdr->host_status,
+ .driver_status = hdr->driver_status,
+ .resid = hdr->resid,
+ .duration = hdr->duration,
+ .info = hdr->info,
+ };
+
+ if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+
+ if (copy_to_user(argp, hdr, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(put_sg_io_hdr);
+
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ struct compat_sg_io_hdr hdr32;
+
+ if (in_compat_syscall()) {
+ if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
+ return -EFAULT;
+
+ *hdr = (struct sg_io_hdr) {
+ .interface_id = hdr32.interface_id,
+ .dxfer_direction = hdr32.dxfer_direction,
+ .cmd_len = hdr32.cmd_len,
+ .mx_sb_len = hdr32.mx_sb_len,
+ .iovec_count = hdr32.iovec_count,
+ .dxfer_len = hdr32.dxfer_len,
+ .dxferp = compat_ptr(hdr32.dxferp),
+ .cmdp = compat_ptr(hdr32.cmdp),
+ .sbp = compat_ptr(hdr32.sbp),
+ .timeout = hdr32.timeout,
+ .flags = hdr32.flags,
+ .pack_id = hdr32.pack_id,
+ .usr_ptr = compat_ptr(hdr32.usr_ptr),
+ .status = hdr32.status,
+ .masked_status = hdr32.masked_status,
+ .msg_status = hdr32.msg_status,
+ .sb_len_wr = hdr32.sb_len_wr,
+ .host_status = hdr32.host_status,
+ .driver_status = hdr32.driver_status,
+ .resid = hdr32.resid,
+ .duration = hdr32.duration,
+ .info = hdr32.info,
+ };
+
+ return 0;
+ }
+#endif
+
+ if (copy_from_user(hdr, argp, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_sg_io_hdr);
+
+#ifdef CONFIG_COMPAT
+struct compat_cdrom_generic_command {
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ compat_caddr_t buffer;
+ compat_uint_t buflen;
+ compat_int_t stat;
+ compat_caddr_t sense;
+ unsigned char data_direction;
+ unsigned char pad[3];
+ compat_int_t quiet;
+ compat_int_t timeout;
+ compat_caddr_t unused;
+};
+#endif
+
+static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
+ const void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32;
+
+ if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
+ return -EFAULT;
+
+ *cgc = (struct cdrom_generic_command) {
+ .buffer = compat_ptr(cgc32.buffer),
+ .buflen = cgc32.buflen,
+ .stat = cgc32.stat,
+ .sense = compat_ptr(cgc32.sense),
+ .data_direction = cgc32.data_direction,
+ .quiet = cgc32.quiet,
+ .timeout = cgc32.timeout,
+ .unused = compat_ptr(cgc32.unused),
+ };
+ memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
+ return 0;
+ }
+#endif
+ if (copy_from_user(cgc, arg, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
+ void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32 = {
+ .buffer = (uintptr_t)(cgc->buffer),
+ .buflen = cgc->buflen,
+ .stat = cgc->stat,
+ .sense = (uintptr_t)(cgc->sense),
+ .data_direction = cgc->data_direction,
+ .quiet = cgc->quiet,
+ .timeout = cgc->timeout,
+ .unused = (uintptr_t)(cgc->unused),
+ };
+ memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
+
+ if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+ if (copy_to_user(arg, cgc, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
+ fmode_t mode, void __user *arg)
+{
+ struct cdrom_generic_command cgc;
+ struct sg_io_hdr hdr;
+ int err;
+
+ err = scsi_get_cdrom_generic_arg(&cgc, arg);
+ if (err)
+ return err;
+
+ cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.interface_id = 'S';
+ hdr.cmd_len = sizeof(cgc.cmd);
+ hdr.dxfer_len = cgc.buflen;
+ switch (cgc.data_direction) {
+ case CGC_DATA_UNKNOWN:
+ hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+ break;
+ case CGC_DATA_WRITE:
+ hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ break;
+ case CGC_DATA_READ:
+ hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ break;
+ case CGC_DATA_NONE:
+ hdr.dxfer_direction = SG_DXFER_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hdr.dxferp = cgc.buffer;
+ hdr.sbp = cgc.sense;
+ if (hdr.sbp)
+ hdr.mx_sb_len = sizeof(struct request_sense);
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
+ hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
+ hdr.cmd_len = sizeof(cgc.cmd);
+
+ err = sg_io(sdev, disk, &hdr, mode);
+ if (err == -EFAULT)
+ return -EFAULT;
+
+ if (hdr.status)
+ return -EIO;
+
+ cgc.stat = err;
+ cgc.buflen = hdr.resid;
+ if (scsi_put_cdrom_generic_arg(&cgc, arg))
+ return -EFAULT;
+
+ return err;
+}
+
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
+ fmode_t mode, void __user *argp)
+{
+ struct sg_io_hdr hdr;
+ int error;
+
+ error = get_sg_io_hdr(&hdr, argp);
+ if (error)
+ return error;
+ error = sg_io(sdev, disk, &hdr, mode);
+ if (error == -EFAULT)
+ return error;
+ if (put_sg_io_hdr(&hdr, argp))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @disk: disk receiving the ioctl
+ * @mode: mode the block/char device is opened with
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
+ int cmd, void __user *arg)
+{
+ struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
/* Check for deprecated ioctls ... all the ioctls which don't
@@ -212,26 +912,34 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
}
switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN: {
- struct scsi_idlun v = {
- .dev_id = (sdev->id & 0xff)
- + ((sdev->lun & 0xff) << 8)
- + ((sdev->channel & 0xff) << 16)
- + ((sdev->host->host_no & 0xff) << 24),
- .host_unique_id = sdev->host->unique_id
- };
- if (copy_to_user(arg, &v, sizeof(struct scsi_idlun)))
- return -EFAULT;
- return 0;
- }
+ case SG_GET_VERSION_NUM:
+ return sg_get_version(arg);
+ case SG_SET_TIMEOUT:
+ return sg_set_timeout(sdev, arg);
+ case SG_GET_TIMEOUT:
+ return jiffies_to_clock_t(sdev->sg_timeout);
+ case SG_GET_RESERVED_SIZE:
+ return sg_get_reserved_size(sdev, arg);
+ case SG_SET_RESERVED_SIZE:
+ return sg_set_reserved_size(sdev, arg);
+ case SG_EMULATED_HOST:
+ return sg_emulated_host(q, arg);
+ case SG_IO:
+ return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ return sg_scsi_ioctl(q, disk, mode, arg);
+ case CDROM_SEND_PACKET:
+ return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+ case CDROMCLOSETRAY:
+ return scsi_send_start_stop(sdev, 3);
+ case CDROMEJECT:
+ return scsi_send_start_stop(sdev, 2);
+ case SCSI_IOCTL_GET_IDLUN:
+ return scsi_get_idlun(sdev, arg);
case SCSI_IOCTL_GET_BUS_NUMBER:
return put_user(sdev->host->host_no, (int __user *)arg);
case SCSI_IOCTL_PROBE_HOST:
return ioctl_probe(sdev->host, arg);
- case SCSI_IOCTL_SEND_COMMAND:
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:
@@ -240,66 +948,27 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
NORMAL_RETRIES, &sense_hdr);
case SCSI_IOCTL_START_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 1;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 1);
case SCSI_IOCTL_STOP_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 0;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 0);
case SCSI_IOCTL_GET_PCI:
return scsi_ioctl_get_pci(sdev, arg);
case SG_SCSI_RESET:
return scsi_ioctl_reset(sdev, arg);
}
- return -ENOIOCTLCMD;
-}
-
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->ioctl)
- return sdev->host->hostt->ioctl(sdev, cmd, arg);
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(scsi_ioctl);
#ifdef CONFIG_COMPAT
-int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->compat_ioctl)
+ if (in_compat_syscall()) {
+ if (!sdev->host->hostt->compat_ioctl)
+ return -EINVAL;
return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
-
- return ret;
-}
-EXPORT_SYMBOL(scsi_compat_ioctl);
+ }
#endif
+ if (!sdev->host->hostt->ioctl)
+ return -EINVAL;
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_ioctl);
/*
* We can process a reset even when a device isn't fully operable.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7456a26aef51..9ba1aa7530a9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -119,13 +119,15 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
- if (cmd->request->rq_flags & RQF_DONTPREP) {
- cmd->request->rq_flags &= ~RQF_DONTPREP;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+
+ if (rq->rq_flags & RQF_DONTPREP) {
+ rq->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(cmd->request, true);
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -164,7 +166,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
*/
cmd->result = 0;
- blk_mq_requeue_request(cmd->request, true);
+ blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true);
}
/**
@@ -478,7 +480,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
@@ -624,7 +626,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
unsigned long wait_for;
if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
@@ -643,7 +645,7 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
@@ -818,7 +820,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
{
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
struct scsi_sense_hdr sshdr;
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -907,7 +909,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
if (unlikely(result)) /* a nz result may or may not be an error */
@@ -978,7 +980,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
struct scatterlist *last_sg = NULL;
blk_status_t ret;
@@ -1083,8 +1085,13 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_request *req = &cmd->req;
+
+ memset(req->__cmd, 0, sizeof(req->__cmd));
+ req->cmd = req->__cmd;
+ req->cmd_len = BLK_MAX_CDB;
+ req->sense_len = 0;
- scsi_req_init(&cmd->req);
init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
@@ -1107,7 +1114,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
- struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries, to_clear;
@@ -1533,7 +1540,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
- cmd->request = req;
cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
@@ -1572,12 +1578,12 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
- if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+ if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request);
+ blk_mq_complete_request(scsi_cmd_to_rq(cmd));
}
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 2317717935e9..ed9572252a42 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -28,8 +28,9 @@ static void scsi_log_release_buffer(char *bufptr)
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
- return scmd->request->rq_disk ?
- scmd->request->rq_disk->disk_name : NULL;
+ struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+
+ return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
@@ -91,7 +92,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scmd->request->tag);
+ scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -188,7 +189,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
return;
off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
@@ -210,7 +211,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
off = sdev_format_header(logbuf, logbuf_len,
scmd_name(cmd),
- cmd->request->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (!WARN_ON(off > logbuf_len - 58)) {
off += scnprintf(logbuf + off, logbuf_len - off,
"CDB[%02x]: ", k);
@@ -373,7 +374,8 @@ EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
void scsi_print_sense(const struct scsi_cmnd *cmd)
{
- scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+ scsi_log_print_sense(cmd->device, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -391,8 +393,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (!logbuf)
return;
- off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index eae2235f79b5..6d9152031a40 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -7,6 +7,7 @@
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
+struct bsg_device;
struct request_queue;
struct request;
struct scsi_cmnd;
@@ -180,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev);
+
extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 5b6996a2401b..fe22191522a3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -267,6 +267,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
+ sdev->sg_reserved_size = INT_MAX;
+
q = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
@@ -974,6 +976,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_UNMAP_LIMIT_WS)
sdev->unmap_limit_for_ws = 1;
+ if (*bflags & BLIST_IGN_MEDIA_CHANGE)
+ sdev->ignore_media_change = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ae9bfc658203..c3a710bceba0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/bsg.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1330,7 +1331,6 @@ static int scsi_target_add(struct scsi_target *starget)
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
- struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
error = scsi_target_add(starget);
@@ -1369,12 +1369,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
- error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
- if (error)
- /* we're treating error on bsg register as non-fatal,
- * so pretend nothing went wrong */
- sdev_printk(KERN_INFO, sdev,
- "Failed to register bsg queue, errno=%d\n", error);
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
+ sdev->bsg_dev = scsi_bsg_register_queue(sdev);
+ if (IS_ERR(sdev->bsg_dev)) {
+ /*
+ * We're treating error on bsg register as non-fatal, so
+ * pretend nothing went wrong.
+ */
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to register bsg queue, errno=%d\n",
+ error);
+ sdev->bsg_dev = NULL;
+ }
+ }
/* add additional host specific attributes */
if (sdev->host->hostt->sdev_attrs) {
@@ -1436,7 +1443,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
sysfs_remove_groups(&sdev->sdev_gendev.kobj,
sdev->host->hostt->sdev_groups);
- bsg_unregister_queue(sdev->request_queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
+ bsg_unregister_queue(sdev->bsg_dev);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
device_del(dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 49748cd817a5..60e406bcf42a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3804,7 +3804,7 @@ bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
- (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
+ (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
return false;
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5af7a10e9514..bd72c38d7bfc 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1230,7 +1230,7 @@ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
{
if (cmd->flags & SCMD_TAGGED) {
*msg++ = SIMPLE_QUEUE_TAG;
- *msg++ = cmd->request->tag;
+ *msg++ = scsi_cmd_to_rq(cmd)->tag;
return 2;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6d2d63629a90..ac431b0477da 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
#define SD_MINORS 16
-#else
-#define SD_MINORS 0
-#endif
static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
@@ -114,6 +110,7 @@ static void sd_shutdown(struct device *);
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
+static int sd_resume_runtime(struct device *);
static void sd_rescan(struct device *);
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
@@ -608,7 +605,7 @@ static const struct dev_pm_ops sd_pm_ops = {
.poweroff = sd_suspend_system,
.restore = sd_resume,
.runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume,
+ .runtime_resume = sd_resume_runtime,
};
static struct scsi_driver sd_template = {
@@ -779,8 +776,9 @@ static unsigned int sd_prot_flag_mask(unsigned int prot_op)
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
- struct bio *bio = scmd->request->bio;
- unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ struct bio *bio = rq->bio;
+ unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
unsigned int protect = 0;
if (dix) { /* DIX Type 0, 1, 2, 3 */
@@ -871,7 +869,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -907,7 +905,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -939,7 +937,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -969,7 +967,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1066,7 +1064,7 @@ out:
**/
static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
struct bio *bio = rq->bio;
@@ -1115,7 +1113,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
/* flush requests don't perform I/O, zero the S/G table */
@@ -1213,7 +1211,7 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1327,7 +1325,7 @@ fail:
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
switch (req_op(rq)) {
case REQ_OP_DISCARD:
@@ -1373,7 +1371,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
- struct request *rq = SCpnt->request;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1533,11 +1531,11 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
/**
- * sd_ioctl_common - process an ioctl
+ * sd_ioctl - process an ioctl
* @bdev: target block device
* @mode: FMODE_* mask
* @cmd: ioctl command number
- * @p: this is third argument given to ioctl(2) system call.
+ * @arg: this is third argument given to ioctl(2) system call.
* Often contains a pointer.
*
* Returns 0 if successful (some ioctls return positive numbers on
@@ -1546,20 +1544,20 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, void __user *p)
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
+ void __user *p = (void __user *)arg;
int error;
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
"cmd=0x%x\n", disk->disk_name, cmd));
- error = scsi_verify_blk_ioctl(bdev, cmd);
- if (error < 0)
- return error;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -1570,27 +1568,11 @@ static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
- goto out;
+ return error;
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to block level and then onto mid level if they can't be
- * resolved.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- error = scsi_ioctl(sdp, cmd, p);
- break;
- default:
- error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
- break;
- }
-out:
- return error;
+ return scsi_ioctl(sdp, disk, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1773,34 +1755,6 @@ static void sd_rescan(struct device *dev)
sd_revalidate_disk(sdkp->disk);
}
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-
-#ifdef CONFIG_COMPAT
-static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- void __user *p = compat_ptr(arg);
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-#endif
-
static char sd_pr_type(enum pr_type type)
{
switch (type) {
@@ -1901,9 +1855,7 @@ static const struct block_device_operations sd_fops = {
.release = sd_release,
.ioctl = sd_ioctl,
.getgeo = sd_getgeo,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sd_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sd_check_events,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
@@ -1924,7 +1876,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1944,7 +1896,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -1985,7 +1937,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- struct request *req = scmd->request;
+ struct request *req = scsi_cmd_to_rq(scmd);
struct scsi_device *sdev = scmd->device;
unsigned int transferred, good_bytes;
u64 start_lba, end_lba, bad_lba;
@@ -2040,8 +1992,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int sector_size = SCpnt->device->sector_size;
unsigned int resid;
struct scsi_sense_hdr sshdr;
- struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
- struct request *req = SCpnt->request;
+ struct request *req = scsi_cmd_to_rq(SCpnt);
+ struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
int sense_valid = 0;
int sense_deferred = 0;
@@ -3720,6 +3672,25 @@ static int sd_resume(struct device *dev)
return ret;
}
+static int sd_resume_runtime(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (sdp->ignore_media_change) {
+ /* clear the device's sense data */
+ static const u8 cmd[10] = { REQUEST_SENSE };
+
+ if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
+ NULL, sdp->request_queue->rq_timeout, 1, 0,
+ RQF_PM, NULL))
+ sd_printk(KERN_NOTICE, sdkp,
+ "Failed to clear sense data\n");
+ }
+
+ return sd_resume(dev);
+}
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 186b5ff52c3a..b9757f24b0d6 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -243,7 +243,7 @@ out:
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
@@ -321,7 +321,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
unsigned int nr_blocks)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
unsigned long flags;
@@ -386,7 +386,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
@@ -442,7 +442,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
unsigned int good_bytes)
{
int result = cmd->result;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
unsigned int zno = blk_rq_zone_no(rq);
enum req_opf op = req_op(rq);
@@ -516,7 +516,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
if (op_is_zone_mgmt(req_op(rq)) &&
result &&
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 91e2221bbb0d..9be76deea242 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -238,8 +238,9 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
-
- return blk_verify_command(cmd, filp->f_mode);
+ if (!scsi_cmd_allowed(cmd, filp->f_mode))
+ return -EPERM;
+ return 0;
}
static int
@@ -1108,7 +1109,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
+ return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1165,28 +1166,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
-
- return scsi_ioctl(sdp->device, cmd_in, p);
-}
-
-#ifdef CONFIG_COMPAT
-static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = compat_ptr(arg);
- Sg_device *sdp;
- Sg_fd *sfp;
- int ret;
-
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
-
- ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- return scsi_compat_ioctl(sdp->device, cmd_in, p);
+ return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
}
-#endif
static __poll_t
sg_poll(struct file *filp, poll_table * wait)
@@ -1441,9 +1422,7 @@ static const struct file_operations sg_fops = {
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sg_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index cb9e4e968b60..6f83e2df4d64 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
@@ -38,14 +38,14 @@
# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
config SCSI_SMARTPQI
- tristate "Microsemi PQI Driver"
+ tristate "Microchip PQI Driver"
depends on PCI && SCSI && !S390
select SCSI_SAS_ATTRS
select RAID_ATTRS
help
- This driver supports Microsemi PQI controllers.
+ This driver supports Microchip PQI controllers.
- <http://www.microsemi.com>
+ <http://www.microchip.com>
To compile this driver as a module, choose M here: the
module will be called smartpqi.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index d7dac5572274..f340afc011b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -59,7 +59,7 @@ struct pqi_device_registers {
/*
* controller registers
*
- * These are defined by the Microsemi implementation.
+ * These are defined by the Microchip implementation.
*
* Some registers (those named sis_*) are only used when in
* legacy SIS mode before we transition the controller into
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index dcc0b9618a64..d95498ff136a 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,13 +33,13 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.8-045"
+#define DRIVER_VERSION "2.1.10-020"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 45
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 20
-#define DRIVER_NAME "Microsemi PQI Driver (v" \
+#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
@@ -48,8 +48,8 @@
#define PQI_POST_RESET_DELAY_SECS 5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
-MODULE_AUTHOR("Microsemi");
-MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+MODULE_AUTHOR("Microchip");
+MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
@@ -5568,7 +5568,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
{
u16 hw_queue;
- hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
if (hw_queue > ctrl_info->max_hw_queue_index)
hw_queue = 0;
@@ -5577,7 +5577,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
{
- if (blk_rq_is_passthrough(scmd->request))
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
return false;
return scmd->SCp.this_residual == 0;
@@ -6033,8 +6033,10 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
mutex_lock(&ctrl_info->lun_reset_mutex);
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d\n",
- shost->host_no, device->bus, device->target, device->lun);
+ "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
+ shost->host_no,
+ device->bus, device->target, device->lun,
+ scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -7758,11 +7760,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_init_operational_queues(ctrl_info);
- rc = pqi_request_irqs(ctrl_info);
+ rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
- rc = pqi_create_queues(ctrl_info);
+ rc = pqi_request_irqs(ctrl_info);
if (rc)
return rc;
@@ -8451,7 +8453,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
if (id->driver_data)
ctrl_description = (char *)id->driver_data;
else
- ctrl_description = "Microsemi Smart Family Controller";
+ ctrl_description = "Microchip Smart Family Controller";
dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
@@ -8713,6 +8715,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1108)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1109)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9173,6 +9183,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1dfc, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5445)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5446)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5447)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b27)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b29)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b45)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dd628cc87f78..afd9bafebd1d 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index c954620628e0..d63c46a8e38b 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 12cd2ab1aead..d29c1352a826 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 6dd0ff188bb4..95740caa1eb0 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -33,7 +33,7 @@
#include "snic_io.h"
#include "snic.h"
-#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
+#define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag)
const char *snic_state_str[] = {
[SNIC_INIT] = "SNIC_INIT",
@@ -1636,7 +1636,7 @@ snic_abort_cmd(struct scsi_cmnd *sc)
u32 start_time = jiffies;
SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request, tag);
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
SNIC_HOST_ERR(snic->shost,
@@ -2152,7 +2152,7 @@ snic_device_reset(struct scsi_cmnd *sc)
int dr_supp = 0;
SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc));
dr_supp = snic_dev_reset_supported(sc->device);
if (!dr_supp) {
@@ -2383,11 +2383,11 @@ snic_host_reset(struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = sc->device->host;
u32 start_time = jiffies;
- int ret = FAILED;
+ int ret;
SNIC_SCSI_DBG(shost,
"host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc), CMD_FLAGS(sc));
ret = snic_reset(shost, sc);
@@ -2494,7 +2494,7 @@ cleanup:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
SNIC_HOST_INFO(snic->shost,
"sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
- sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+ sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
jiffies_to_msecs(jiffies - st_time));
/* Update IO stats */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a6d3ac0a6cbc..d65dbd189514 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -120,6 +120,8 @@ static void get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense);
static const struct cdrom_device_ops sr_dops = {
.open = sr_open,
@@ -133,8 +135,9 @@ static const struct cdrom_device_ops sr_dops = {
.get_mcn = sr_get_mcn,
.reset = sr_reset,
.audio_ioctl = sr_audio_ioctl,
- .capability = SR_CAPABILITIES,
.generic_packet = sr_packet,
+ .read_cdda_bpc = sr_read_cdda_bpc,
+ .capability = SR_CAPABILITIES,
};
static void sr_kref_release(struct kref *kref);
@@ -328,7 +331,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int good_bytes = (result == 0 ? this_count : 0);
int block_sectors = 0;
long error_sector;
- struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
+ struct scsi_cd *cd = scsi_cd(rq->rq_disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -350,16 +354,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
break;
error_sector =
get_unaligned_be32(&SCpnt->sense_buffer[3]);
- if (SCpnt->request->bio != NULL)
- block_sectors =
- bio_sectors(SCpnt->request->bio);
+ if (rq->bio != NULL)
+ block_sectors = bio_sectors(rq->bio);
if (block_sectors < 4)
block_sectors = 4;
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector -
- blk_rq_pos(SCpnt->request)) << 9;
+ good_bytes = (error_sector - blk_rq_pos(rq)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -391,7 +393,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
{
int block = 0, this_count, s_size;
struct scsi_cd *cd;
- struct request *rq = SCpnt->request;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
blk_status_t ret;
ret = scsi_alloc_sgtables(SCpnt);
@@ -556,53 +558,14 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_cd *cd = scsi_cd(disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
- mutex_lock(&cd->lock);
-
- ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
- if (ret)
- goto out;
-
- scsi_autopm_get_device(sdev);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_ioctl(sdev, cmd, argp);
- goto put;
- }
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_ioctl(sdev, cmd, argp);
-
-put:
- scsi_autopm_put_device(sdev);
-
-out:
- mutex_unlock(&cd->lock);
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
- unsigned long arg)
-{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
- struct scsi_device *sdev = cd->device;
- void __user *argp = compat_ptr(arg);
- int ret;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
mutex_lock(&cd->lock);
@@ -613,32 +576,19 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
scsi_autopm_get_device(sdev);
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_compat_ioctl(sdev, cmd, argp);
- goto put;
+ if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+ goto put;
}
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_compat_ioctl(sdev, cmd, argp);
+ ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
-
out:
mutex_unlock(&cd->lock);
return ret;
-
}
-#endif
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
@@ -663,9 +613,7 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sr_block_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
};
@@ -1005,6 +953,57 @@ static int sr_packet(struct cdrom_device_info *cdi,
return cgc->stat;
}
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense)
+{
+ struct gendisk *disk = cdi->disk;
+ u32 len = nr * CD_FRAMESIZE_RAW;
+ struct scsi_request *req;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
+ rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ req = scsi_req(rq);
+
+ ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
+ if (ret)
+ goto out_put_request;
+
+ req->cmd[0] = GPCMD_READ_CD;
+ req->cmd[1] = 1 << 2;
+ req->cmd[2] = (lba >> 24) & 0xff;
+ req->cmd[3] = (lba >> 16) & 0xff;
+ req->cmd[4] = (lba >> 8) & 0xff;
+ req->cmd[5] = lba & 0xff;
+ req->cmd[6] = (nr >> 16) & 0xff;
+ req->cmd[7] = (nr >> 8) & 0xff;
+ req->cmd[8] = nr & 0xff;
+ req->cmd[9] = 0xf8;
+ req->cmd_len = 12;
+ rq->timeout = 60 * HZ;
+ bio = rq->bio;
+
+ blk_execute_rq(disk, rq, 0);
+ if (scsi_req(rq)->result) {
+ struct scsi_sense_hdr sshdr;
+
+ scsi_normalize_sense(req->sense, req->sense_len,
+ &sshdr);
+ *last_sense = sshdr.sense_key;
+ ret = -EIO;
+ }
+
+ if (blk_rq_unmap_user(bio))
+ ret = -EFAULT;
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+
/**
* sr_kref_release - Called to free the scsi_cd structure
* @kref: pointer to embedded kref
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c6f14540ae03..2d1b0594af69 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3499,8 +3499,9 @@ out:
/* The ioctl command */
-static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = (void __user *)arg;
int i, cmd_nr, cmd_type, bt;
int retval = 0;
unsigned int blk;
@@ -3820,73 +3821,44 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
goto out;
}
mutex_unlock(&STp->lock);
- switch (cmd_in) {
- case SCSI_IOCTL_STOP_UNIT:
- /* unload */
- retval = scsi_ioctl(STp->device, cmd_in, p);
- if (!retval) {
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- break;
+ switch (cmd_in) {
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ default:
+ break;
+ }
- default:
- if ((cmd_in == SG_IO ||
- cmd_in == SCSI_IOCTL_SEND_COMMAND ||
- cmd_in == CDROM_SEND_PACKET) &&
- !capable(CAP_SYS_RAWIO))
- i = -EPERM;
- else
- i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
- file->f_mode, cmd_in, p);
- if (i != -ENOTTY)
- return i;
- break;
+ retval = scsi_ioctl(STp->device, STp->disk, file->f_mode, cmd_in, p);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
}
- return -ENOTTY;
+ return retval;
out:
mutex_unlock(&STp->lock);
return retval;
}
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- struct scsi_tape *STp = file->private_data;
- int ret;
-
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(STp->device, cmd_in, p);
-}
-
#ifdef CONFIG_COMPAT
static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
- void __user *p = compat_ptr(arg);
- struct scsi_tape *STp = file->private_data;
- int ret;
-
/* argument conversion is handled using put_user_mtpos/put_user_mtget */
switch (cmd_in) {
case MTIOCPOS32:
- return st_ioctl_common(file, MTIOCPOS, p);
+ cmd_in = MTIOCPOS;
+ break;
case MTIOCGET32:
- return st_ioctl_common(file, MTIOCGET, p);
+ cmd_in = MTIOCGET;
+ break;
}
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_compat_ioctl(STp->device, cmd_in, p);
+ return st_ioctl(file, cmd_in, arg);
}
#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 491b435273a6..f1ba7f5b52a8 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -540,7 +540,7 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
msg_h = (struct st_msg_header *)req - 1;
if (likely(cmd)) {
msg_h->channel = (u8)cmd->device->channel;
- msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
}
addr = hba->dma_handle + hba->req_head * hba->rq_size;
addr += (hba->ccb[tag].sg_count+4)/11;
@@ -690,7 +690,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->scsi_done = done;
- tag = cmd->request->tag;
+ tag = scsi_cmd_to_rq(cmd)->tag;
if (unlikely(tag >= host->can_queue))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1246,7 +1246,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct st_hba *hba = (struct st_hba *)host->hostdata;
- u16 tag = cmd->request->tag;
+ u16 tag = scsi_cmd_to_rq(cmd)->tag;
void __iomem *base;
u32 data;
int result = SUCCESS;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 328bb961c281..e2278b0125e7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -710,7 +710,7 @@ static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
* Cannot return an ID of 0, which is reserved for an unsolicited
* message from Hyper-V.
*/
- return (u64)blk_mq_unique_tag(request->cmd->request) + 1;
+ return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
}
static void handle_sc_creation(struct vmbus_channel *new_sc)
@@ -1202,7 +1202,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
storvsc_log(device, STORVSC_LOGGING_ERROR,
"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
- request->cmd->request->tag,
+ scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
vstor_packet->vm_srb.srb_status,
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 2e3fbc2fae97..f7f724a3ff1d 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -336,7 +336,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
{
int wanted_len = cmd->SCp.this_residual;
- if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
+ if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
return 0;
return wanted_len;
@@ -366,8 +366,9 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
}
/* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
+static int sun3scsi_dma_finish(enum dma_data_direction data_dir)
{
+ const bool write_flag = data_dir == DMA_TO_DEVICE;
unsigned short __maybe_unused count;
unsigned short fifo;
int ret = 0;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 16b65fc4405c..6d0b07b9cb31 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -500,8 +500,8 @@ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->request->timeout) {
- unsigned long tlimit = jiffies + cmd->request->timeout;
+ if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) {
+ unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2d137953e7b4..432df76e6318 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -183,3 +183,19 @@ config SCSI_UFS_CRYPTO
Enabling this makes it possible for the kernel to use the crypto
capabilities of the UFS device (if present) to perform crypto
operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+ bool "Support UFS Host Performance Booster"
+ depends on SCSI_UFSHCD
+ help
+ The UFS HPB feature improves random read performance. It caches
+ L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+ read command by piggybacking physical page number for bypassing FTL (flash
+ translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+ bool "UFS Fault Injection Support"
+ depends on SCSI_UFSHCD && FAULT_INJECTION
+ help
+ Enable fault injection support in the UFS driver. This makes it easier
+ to test the UFS error handler and abort handler.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 06f3a3fe4a44..c407da9b5171 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -8,6 +8,8 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 908ff39c4856..7da8be2f35c4 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -318,11 +318,8 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index ec4589afbc13..679289e1a78e 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -23,31 +23,6 @@ static int tc_type = TC_G210_INV;
module_param(tc_type, int, 0);
MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
-static int tc_dwc_g210_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-
/*
* struct ufs_hba_dwc_vops - UFS DWC specific variant operations
*/
@@ -143,11 +118,8 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
- .suspend = tc_dwc_g210_pci_suspend,
- .resume = tc_dwc_g210_pci_resume,
- .runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
- .runtime_resume = tc_dwc_g210_pci_runtime_resume,
- .runtime_idle = tc_dwc_g210_pci_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
index a1268e4f44d6..783ec43efa78 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
@@ -84,11 +84,8 @@ static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
};
static struct platform_driver tc_dwc_g210_pltfm_driver = {
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index cf46d6f86e0e..5aa096e5b6cc 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -1287,11 +1287,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
};
static const struct dev_pm_ops exynos_ufs_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/scsi/ufs/ufs-fault-injection.c
new file mode 100644
index 000000000000..7ac7c4e7ff83
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-fault-injection.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+#include <linux/fault-inject.h>
+#include <linux/module.h>
+#include "ufs-fault-injection.h"
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
+static int ufs_fault_set(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops ufs_fault_ops = {
+ .get = ufs_fault_get,
+ .set = ufs_fault_set,
+};
+
+enum { FAULT_INJ_STR_SIZE = 80 };
+
+/*
+ * For more details about fault injection, please refer to
+ * Documentation/fault-injection/fault-injection.rst.
+ */
+static char g_trigger_eh_str[FAULT_INJ_STR_SIZE];
+module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644);
+MODULE_PARM_DESC(trigger_eh,
+ "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr);
+
+static char g_timeout_str[FAULT_INJ_STR_SIZE];
+module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644);
+MODULE_PARM_DESC(timeout,
+ "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_timeout_attr);
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp)
+{
+ const char *fault_str = kp->arg;
+
+ return sysfs_emit(buffer, "%s\n", fault_str);
+}
+
+static int ufs_fault_set(const char *val, const struct kernel_param *kp)
+{
+ struct fault_attr *attr = NULL;
+
+ if (kp->arg == g_trigger_eh_str)
+ attr = &ufs_trigger_eh_attr;
+ else if (kp->arg == g_timeout_str)
+ attr = &ufs_timeout_attr;
+
+ if (WARN_ON_ONCE(!attr))
+ return -EINVAL;
+
+ if (!setup_fault_attr(attr, (char *)val))
+ return -EINVAL;
+
+ strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE);
+
+ return 0;
+}
+
+bool ufs_trigger_eh(void)
+{
+ return should_fail(&ufs_trigger_eh_attr, 1);
+}
+
+bool ufs_fail_completion(void)
+{
+ return should_fail(&ufs_timeout_attr, 1);
+}
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/scsi/ufs/ufs-fault-injection.h
new file mode 100644
index 000000000000..6d0cd8e10c87
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-fault-injection.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _UFS_FAULT_INJECTION_H
+#define _UFS_FAULT_INJECTION_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+bool ufs_trigger_eh(void);
+bool ufs_fail_completion(void);
+#else
+static inline bool ufs_trigger_eh(void)
+{
+ return false;
+}
+
+static inline bool ufs_fail_completion(void)
+{
+ return false;
+}
+#endif
+
+#endif /* _UFS_FAULT_INJECTION_H */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 5b147a48161b..6b706de8354b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -572,11 +572,8 @@ static int ufs_hisi_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ufs_hisi_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index d2c251628a05..80b3545dd17d 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -1140,11 +1140,8 @@ static int ufs_mtk_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9b1d18d7c9bb..9d9770f1db4f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1546,11 +1546,8 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 52bd807f7940..5c405ff7b6ea 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -604,6 +604,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
+UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
@@ -636,6 +638,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_hpb_version.attr,
+ &dev_attr_hpb_control.attr,
&dev_attr_ext_feature_sup.attr,
&dev_attr_wb_presv_us_en.attr,
&dev_attr_wb_type.attr,
@@ -709,6 +713,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
@@ -746,6 +754,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_hpb_region_size.attr,
+ &dev_attr_hpb_number_lu.attr,
+ &dev_attr_hpb_subregion_size.attr,
+ &dev_attr_hpb_max_active_regions.attr,
&dev_attr_wb_max_alloc_units.attr,
&dev_attr_wb_max_wb_luns.attr,
&dev_attr_wb_buff_cap_adj.attr,
@@ -1006,6 +1018,7 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
UFS_FLAG(wb_enable, _WB_EN);
UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
+UFS_FLAG(hpb_enable, _HPB_EN);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -1019,6 +1032,7 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_wb_enable.attr,
&dev_attr_wb_flush_en.attr,
&dev_attr_wb_flush_during_h8.attr,
+ &dev_attr_hpb_enable.attr,
NULL,
};
@@ -1065,6 +1079,7 @@ out: \
static DEVICE_ATTR_RO(_name)
UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
+UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
@@ -1088,6 +1103,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
+ &dev_attr_max_data_size_hpb_single_cmd.attr,
&dev_attr_current_power_mode.attr,
&dev_attr_active_icc_level.attr,
&dev_attr_ooo_data_enabled.attr,
@@ -1147,6 +1163,7 @@ static DEVICE_ATTR_RO(_pname)
#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
+UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
@@ -1160,10 +1177,13 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
+UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
+UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
-
static struct attribute *ufs_sysfs_unit_descriptor[] = {
+ &dev_attr_lu_enable.attr,
&dev_attr_boot_lun_id.attr,
&dev_attr_lun_write_protect.attr,
&dev_attr_lun_queue_depth.attr,
@@ -1177,6 +1197,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_hpb_lu_max_active_regions.attr,
+ &dev_attr_hpb_pinned_region_start_offset.attr,
+ &dev_attr_hpb_number_pinned_regions.attr,
&dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index cb80b9670bfe..8c6b38b1b142 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -122,12 +122,14 @@ enum flag_idn {
QUERY_FLAG_IDN_WB_EN = 0x0E,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ QUERY_FLAG_IDN_HPB_EN = 0x12,
};
/* Attribute idn for Query requests */
enum attr_idn {
QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_RESERVED = 0x01,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
QUERY_ATTR_IDN_POWER_MODE = 0x02,
QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
@@ -195,6 +197,9 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
@@ -235,6 +240,8 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
DEVICE_DESC_PARAM_WB_TYPE = 0x54,
@@ -283,6 +290,10 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
@@ -327,8 +338,10 @@ enum {
/* Possible values for dExtendedUFSFeaturesSupport */
enum {
+ UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
+#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -466,6 +479,41 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
+struct ufshpb_active_field {
+ __be16 active_rgn;
+ __be16 active_srgn;
+};
+#define HPB_ACT_FIELD_SIZE 4
+
+/**
+ * struct utp_hpb_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved1: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @desc_type: Descriptor type of sense data
+ * @additional_len: Additional length of sense data
+ * @hpb_op: HPB operation type
+ * @lun: LUN of response UPIU
+ * @active_rgn_cnt: Active region count
+ * @inactive_rgn_cnt: Inactive region count
+ * @hpb_active_field: Recommended to read HPB region and subregion
+ * @hpb_inactive_field: To be inactivated HPB region and subregion
+ */
+struct utp_hpb_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved1[4];
+ __be16 sense_data_len;
+ u8 desc_type;
+ u8 additional_len;
+ u8 hpb_op;
+ u8 lun;
+ u8 active_rgn_cnt;
+ u8 inactive_rgn_cnt;
+ struct ufshpb_active_field hpb_active_field[2];
+ __be16 hpb_inactive_field[2];
+};
+#define UTP_HPB_RSP_SIZE 40
+
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -476,6 +524,7 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
+ struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
@@ -544,6 +593,9 @@ struct ufs_dev_info {
u16 wspecversion;
u32 clk_gating_wait_us;
+ /* UFS HPB related flag */
+ bool hpb_enabled;
+
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 07f559ac5883..35ec9ea79869 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -116,4 +116,10 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/*
+ * Some UFS devices require L2P entry should be swapped before being sent to the
+ * UFS device for HPB READ command.
+ */
+#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index e6c334bfb4c2..b3bcc5c882da 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -385,48 +385,6 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
-#ifdef CONFIG_PM_SLEEP
-/**
- * ufshcd_pci_suspend - suspend power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-/**
- * ufshcd_pci_resume - resume power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-#endif /* !CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-static int ufshcd_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-#endif /* !CONFIG_PM */
-
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -510,10 +468,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
- ufshcd_pci_runtime_resume,
- ufshcd_pci_runtime_idle)
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 298e22ef907e..8859c13f4e09 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -170,53 +170,6 @@ out:
return err;
}
-#ifdef CONFIG_PM
-/**
- * ufshcd_pltfrm_suspend - suspend power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
-
-/**
- * ufshcd_pltfrm_resume - resume power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
-
-int ufshcd_pltfrm_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
-
-int ufshcd_pltfrm_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
-
-int ufshcd_pltfrm_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
-
-#endif /* CONFIG_PM */
-
void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
{
ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index 772a8e848098..c33e28ac6ef6 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -33,22 +33,4 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
-#ifdef CONFIG_PM
-
-int ufshcd_pltfrm_suspend(struct device *dev);
-int ufshcd_pltfrm_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_suspend(struct device *dev);
-int ufshcd_pltfrm_runtime_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_idle(struct device *dev);
-
-#else /* !CONFIG_PM */
-
-#define ufshcd_pltfrm_suspend NULL
-#define ufshcd_pltfrm_resume NULL
-#define ufshcd_pltfrm_runtime_suspend NULL
-#define ufshcd_pltfrm_runtime_resume NULL
-#define ufshcd_pltfrm_runtime_idle NULL
-
-#endif /* CONFIG_PM */
-
#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 708b3b62fc4d..a3b419848f0a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -17,15 +17,18 @@
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_transport.h>
+#include "../scsi_transport_api.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
+#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
+#include "ufshpb.h"
#include <asm/unaligned.h>
-#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -128,15 +131,6 @@ enum {
UFSHCD_CAN_QUEUE = 32,
};
-/* UFSHCD states */
-enum {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_ERROR,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED_FATAL,
- UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
-};
-
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -205,7 +199,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
@@ -242,7 +237,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -253,11 +247,6 @@ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
-{
- return tag >= 0 && tag < hba->nutrs;
-}
-
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
@@ -371,26 +360,24 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- u64 lba = -1;
+ u64 lba;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
+ struct request *rq = scsi_cmd_to_rq(cmd);
int transfer_len = -1;
if (!cmd)
return;
- if (!trace_ufshcd_command_enabled()) {
- /* trace UPIU W/O tracing command */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- return;
- }
-
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ if (!trace_ufshcd_command_enabled())
+ return;
+
opcode = cmd->cmnd[0];
- lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+ lba = scsi_get_lba(cmd);
if (opcode == READ_10 || opcode == WRITE_10) {
/*
@@ -404,7 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
/*
* The number of Bytes to be unmapped beginning with the lba.
*/
- transfer_len = blk_rq_bytes(cmd->request);
+ transfer_len = blk_rq_bytes(rq);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -758,16 +745,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
- * @hba: per adapter instance
- * @tag: position of the bit to be cleared
- */
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
-{
- clear_bit(tag, &hba->outstanding_reqs);
-}
-
-/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -2078,7 +2055,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = lrbp->cmd->request;
+ struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2112,27 +2089,22 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
- ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- if (ufshcd_has_utrlcnr(hba)) {
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- } else {
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2247,15 +2219,15 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
- * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
* @hba: per adapter instance
* @uic_cmd: UIC command
- *
- * Mutex must be held.
*/
static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
WARN_ON(hba->active_uic_cmd);
hba->active_uic_cmd = uic_cmd;
@@ -2273,11 +2245,10 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Must be called with mutex held.
* Returns 0 only if success.
*/
static int
@@ -2286,6 +2257,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
@@ -2315,14 +2288,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
- * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+ lockdep_assert_held(hba->host->host_lock);
+
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
@@ -2701,20 +2675,12 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
- struct ufs_hba *hba;
- int tag;
int err = 0;
- hba = shost_priv(host);
-
- tag = cmd->request->tag;
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2748,12 +2714,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out;
}
hba->req_abort_count = 0;
@@ -2766,15 +2726,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- if (hba->pm_op_in_progress)
- set_host_byte(cmd, DID_BAD_TARGET);
- else
- err = SCSI_MLQUEUE_HOST_BUSY;
- ufshcd_release(hba);
- goto out;
- }
-
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
@@ -2784,10 +2735,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
- ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
+ ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
lrbp->req_abort_skip = false;
+ err = ufshpb_prep(hba, lrbp);
+ if (err == -EAGAIN) {
+ lrbp->cmd = NULL;
+ ufshcd_release(hba);
+ goto out;
+ }
+
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
@@ -2796,12 +2754,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_release(hba);
goto out;
}
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
out:
up_read(&hba->clk_scaling_lock);
+
+ if (ufs_trigger_eh())
+ scsi_schedule_eh(hba->host);
+
return err;
}
@@ -2907,8 +2867,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -2930,7 +2888,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* we also need to clear the outstanding_request
* field in hba
*/
- ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
return err;
@@ -2949,11 +2909,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
- struct completion wait;
down_read(&hba->clk_scaling_lock);
@@ -2968,17 +2928,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
/* Set the timeout such that the SCSI error handler is not activated. */
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
-
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@@ -2988,8 +2942,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3194,7 +3146,7 @@ out_unlock:
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
{
@@ -3419,9 +3371,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
- if (param_offset + param_size > buff_len)
- param_size = buff_len - param_offset;
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ if (param_offset >= buff_len)
+ ret = -EINVAL;
+ else
+ memcpy(param_read_buf, &desc_buf[param_offset],
+ min_t(u32, param_size, buff_len - param_offset));
}
out:
if (is_kmalloc)
@@ -3965,6 +3919,35 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ lockdep_assert_held(hba->host->host_lock);
+
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+static void ufshcd_schedule_eh(struct ufs_hba *hba)
+{
+ bool schedule_eh = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ schedule_eh = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ scsi_schedule_eh(hba->host);
+}
+
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
@@ -3983,14 +3966,14 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
- struct completion uic_async_done;
+ DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
+ bool schedule_eh = false;
u8 status;
int ret;
bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
- init_completion(&uic_async_done);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4055,10 +4038,14 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
- ufshcd_schedule_eh_work(hba);
+ schedule_eh = true;
}
+
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
@@ -4987,6 +4974,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
return scsi_change_queue_depth(sdev, depth);
}
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_destroy_lu(hba, sdev);
+}
+
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_init_hpb_lu(hba, sdev);
+}
+
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
@@ -4996,6 +5003,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
+ ufshcd_hpb_configure(hba, sdev);
+
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
@@ -5020,15 +5029,37 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
static void ufshcd_slave_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
+ unsigned long flags;
hba = shost_priv(sdev->host);
+
+ ufshcd_hpb_destroy(hba, sdev);
+
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
- unsigned long flags;
-
spin_lock_irqsave(hba->host->host_lock, flags);
hba->sdev_ufs_device = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ } else if (hba->sdev_ufs_device) {
+ struct device *supplier = NULL;
+
+ /* Ensure UFS Device WLUN exists and does not disappear */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->sdev_ufs_device) {
+ supplier = &hba->sdev_ufs_device->sdev_gendev;
+ get_device(supplier);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (supplier) {
+ /*
+ * If a LUN fails to probe (e.g. absent BOOT WLUN), the
+ * device will not have been registered but can still
+ * have a device link holding a reference to the device.
+ */
+ device_link_remove(&sdev->sdev_gendev, supplier);
+ put_device(supplier);
+ }
}
}
@@ -5124,6 +5155,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
+
+ if (scsi_status == SAM_STAT_GOOD)
+ ufshpb_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5232,10 +5266,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @completed_reqs: requests to complete
+ * @completed_reqs: bitmask that indicates which requests to complete
+ * @retry_requests: whether to ask the SCSI core to retry completed requests
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+ unsigned long completed_reqs,
+ bool retry_requests)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5244,8 +5280,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- if (!test_and_clear_bit(index, &hba->outstanding_reqs))
- continue;
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
@@ -5253,7 +5287,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
+ result = retry_requests ? DID_BUS_BUSY << 16 :
+ ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
@@ -5277,17 +5312,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
/**
- * ufshcd_trc_handler - handle transfer requests completion
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @use_utrlcnr: get completed requests from UTRLCNR
+ * @retry_requests: whether or not to ask to retry requests
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ bool retry_requests)
{
- unsigned long completed_reqs = 0;
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5300,27 +5337,21 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- if (use_utrlcnr) {
- u32 utrlcnr;
+ if (ufs_fail_completion())
+ return IRQ_HANDLED;
- utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
- if (utrlcnr) {
- ufshcd_writel(hba, utrlcnr,
- REG_UTP_TRANSFER_REQ_LIST_COMPL);
- completed_reqs = utrlcnr;
- }
- } else {
- unsigned long flags;
- u32 tr_doorbell;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ __ufshcd_transfer_req_compl(hba, completed_reqs,
+ retry_requests);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -5799,7 +5830,13 @@ out:
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_trc_handler(hba, false);
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+ ufshcd_tmc_handler(hba);
+}
+
+static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
ufshcd_tmc_handler(hba);
}
@@ -5874,27 +5911,6 @@ out:
return err_handling;
}
-/* host lock must be held before calling this func */
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
-{
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- queue_work(hba->eh_wq, &hba->eh_work);
- }
-}
-
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -6028,11 +6044,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
+ * @host: SCSI host pointer
*/
-static void ufshcd_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct Scsi_Host *host)
{
- struct ufs_hba *hba;
+ struct ufs_hba *hba = shost_priv(host);
unsigned long flags;
bool err_xfer = false;
bool err_tm = false;
@@ -6040,10 +6056,9 @@ static void ufshcd_err_handler(struct work_struct *work)
int tag;
bool needs_reset = false, needs_restore = false;
- hba = container_of(work, struct ufs_hba, eh_work);
-
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6141,8 +6156,7 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- /* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
@@ -6357,7 +6371,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
- ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -6369,6 +6382,10 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
+
+ if (queue_eh_work)
+ ufshcd_schedule_eh(hba);
+
return retval;
}
@@ -6440,7 +6457,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
+ retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
return retval;
}
@@ -6548,9 +6565,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- /* Make sure descriptors are ready before ringing the task doorbell */
- wmb();
-
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
@@ -6663,11 +6677,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
- struct completion wait;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6678,14 +6692,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
@@ -6723,8 +6736,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
/*
@@ -6865,7 +6876,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, pos);
+ __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
}
}
@@ -6981,33 +6992,24 @@ out:
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
+ struct Scsi_Host *host = cmd->device->host;
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
- unsigned int tag;
- int err = 0;
- struct ufshcd_lrb *lrbp;
+ int err = FAILED;
u32 reg;
- host = cmd->device->host;
- hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return SUCCESS */
+ /* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
dev_err(hba->dev,
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
__func__, tag, hba->outstanding_reqs, reg);
- goto out;
+ goto release;
}
/* Print Transfer Request of aborted task */
@@ -7036,7 +7038,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- goto cleanup;
+ __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ goto release;
}
/*
@@ -7045,40 +7048,39 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the eh_work and bail.
+ * then queue the error handler and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- set_bit(tag, &hba->outstanding_reqs);
+
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
- goto out;
+
+ ufshcd_schedule_eh(hba);
+
+ goto release;
}
/* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip)
- err = -EIO;
- else
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skipping abort\n", __func__);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ goto release;
+ }
- if (!err) {
-cleanup:
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
-out:
- err = SUCCESS;
- } else {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
+ goto release;
}
- /*
- * This ufshcd_release() corresponds to the original scsi cmd that got
- * aborted here (as we won't get any IRQ for it).
- */
+ err = SUCCESS;
+
+release:
+ /* Matches the ufshcd_hold() call at the start of this function. */
ufshcd_release(hba);
return err;
}
@@ -7101,9 +7103,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshpb_reset_host(hba);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -7188,11 +7191,10 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->eh_work);
+ ufshcd_err_handler(hba->host);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -7499,6 +7501,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
+ u8 b_ufs_feature_sup;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -7526,9 +7529,26 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+ if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
+ (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
+ bool hpb_en = false;
+
+ ufshpb_get_dev_info(hba, desc_buf);
+
+ if (!ufshpb_is_legacy(hba))
+ err = ufshcd_query_flag_retry(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_HPB_EN, 0,
+ &hpb_en);
+
+ if (ufshpb_is_legacy(hba) || (!err && hpb_en))
+ dev_info->hpb_enabled = true;
+ }
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -7760,6 +7780,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
+ ufshpb_get_geo_info(hba, desc_buf);
+
out:
kfree(desc_buf);
return err;
@@ -7902,6 +7926,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
+ ufshpb_init(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -7909,8 +7934,39 @@ out:
return ret;
}
+static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
+{
+ if (error != BLK_STS_OK)
+ pr_err("%s: REQUEST SENSE failed (%d)", __func__, error);
+ blk_put_request(rq);
+}
+
static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /*
+ * From SPC-6: the REQUEST SENSE command with any allocation length
+ * clears the sense data.
+ */
+ static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, 0, 0};
+ struct scsi_request *rq;
+ struct request *req;
+
+ req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, /*flags=*/0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ rq = scsi_req(req);
+ rq->cmd_len = ARRAY_SIZE(cmd);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
+ rq->retries = 3;
+ req->timeout = 1 * HZ;
+ req->rq_flags |= RQF_PM | RQF_QUIET;
+
+ blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true,
+ ufshcd_request_sense_done);
+ return 0;
+}
static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
{
@@ -7938,7 +7994,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
if (ret)
goto out_err;
- ret = ufshcd_send_request_sense(hba, sdp);
+ ret = ufshcd_request_sense_async(hba, sdp);
scsi_device_put(sdp);
out_err:
if (ret)
@@ -7967,13 +8023,13 @@ out:
}
/**
- * ufshcd_probe_hba - probe hba to detect device and initialize
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
- * @async: asynchronous execution or not
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
unsigned long flags;
@@ -8005,7 +8061,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
* Initialize UFS device parameters used by driver, these
* parameters are associated with UFS descriptors.
*/
- if (async) {
+ if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
goto out;
@@ -8050,6 +8106,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ ufshpb_reset(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
@@ -8097,6 +8154,10 @@ out:
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
+#ifdef CONFIG_SCSI_UFS_HPB
+ &ufs_sysfs_hpb_stat_group,
+ &ufs_sysfs_hpb_param_group,
+#endif
NULL,
};
@@ -8521,8 +8582,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
- if (hba->eh_wq)
- destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
@@ -8533,35 +8592,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
}
}
-static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
-{
- unsigned char cmd[6] = {REQUEST_SENSE,
- 0,
- 0,
- 0,
- UFS_SENSE_SIZE,
- 0};
- char *buffer;
- int ret;
-
- buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFS_SENSE_SIZE, NULL, NULL,
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
- if (ret)
- pr_err("%s: failed with err %d\n", __func__, ret);
-
- kfree(buffer);
-out:
- return ret;
-}
-
/**
* ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
* power mode
@@ -8739,6 +8769,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
usleep_range(5000, 5100);
}
+#ifdef CONFIG_PM
static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
{
int ret = 0;
@@ -8766,6 +8797,7 @@ vcc_disable:
out:
return ret;
}
+#endif /* CONFIG_PM */
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
@@ -8798,6 +8830,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
+ ufshpb_suspend(hba);
+
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -8921,6 +8955,7 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
+ ufshpb_resume(hba);
}
hba->pm_op_in_progress = false;
return ret;
@@ -8999,6 +9034,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+
+ ufshpb_resume(hba);
goto out;
set_old_link_state:
@@ -9168,6 +9205,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
return ret;
}
+#ifdef CONFIG_PM
/**
* ufshcd_resume - helper function for resume operations
* @hba: per adapter instance
@@ -9205,17 +9243,21 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
+#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
/**
- * ufshcd_system_suspend - system suspend routine
- * @hba: per adapter instance
+ * ufshcd_system_suspend - system suspend callback
+ * @dev: Device associated with the UFS controller.
*
- * Check the description of ufshcd_suspend() function for more details.
+ * Executed before putting the system into a sleep state in which the contents
+ * of main memory are preserved.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_system_suspend(struct ufs_hba *hba)
+int ufshcd_system_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret = 0;
ktime_t start = ktime_get();
@@ -9232,16 +9274,19 @@ out:
EXPORT_SYMBOL(ufshcd_system_suspend);
/**
- * ufshcd_system_resume - system resume routine
- * @hba: per adapter instance
+ * ufshcd_system_resume - system resume callback
+ * @dev: Device associated with the UFS controller.
+ *
+ * Executed after waking the system up from a sleep state in which the contents
+ * of main memory were preserved.
*
* Returns 0 for success and non-zero for failure
*/
-
-int ufshcd_system_resume(struct ufs_hba *hba)
+int ufshcd_system_resume(struct device *dev)
{
- int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start = ktime_get();
+ int ret = 0;
if (pm_runtime_suspended(hba->dev))
goto out;
@@ -9256,17 +9301,20 @@ out:
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM
/**
- * ufshcd_runtime_suspend - runtime suspend routine
- * @hba: per adapter instance
+ * ufshcd_runtime_suspend - runtime suspend callback
+ * @dev: Device associated with the UFS controller.
*
* Check the description of ufshcd_suspend() function for more details.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
+int ufshcd_runtime_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9281,7 +9329,7 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
/**
* ufshcd_runtime_resume - runtime resume routine
- * @hba: per adapter instance
+ * @dev: Device associated with the UFS controller.
*
* This function basically brings controller
* to active state. Following operations are done in this function:
@@ -9289,8 +9337,9 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
*/
-int ufshcd_runtime_resume(struct ufs_hba *hba)
+int ufshcd_runtime_resume(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9302,12 +9351,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba)
return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
-
-int ufshcd_runtime_idle(struct ufs_hba *hba)
-{
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_runtime_idle);
+#endif /* CONFIG_PM */
/**
* ufshcd_shutdown - shutdown routine
@@ -9343,6 +9387,7 @@ void ufshcd_remove(struct ufs_hba *hba)
if (hba->sdev_ufs_device)
ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
+ ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
@@ -9381,6 +9426,10 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
+static struct scsi_transport_template ufshcd_transport_template = {
+ .eh_strategy_handler = ufshcd_err_handler,
+};
+
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
@@ -9407,13 +9456,15 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->transportt = &ufshcd_transport_template;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
- *hba_handle = hba;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
-
INIT_LIST_HEAD(&hba->clk_list_head);
+ spin_lock_init(&hba->outstanding_lock);
+
+ *hba_handle = hba;
out_error:
return err;
@@ -9444,7 +9495,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -9498,17 +9548,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
- if (!hba->eh_wq) {
- dev_err(hba->dev, "%s: failed to create eh workqueue\n",
- __func__);
- err = -ENOMEM;
- goto out_disable;
- }
- INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
@@ -9627,6 +9666,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
+ device_enable_async_suspend(dev);
return 0;
free_tmf_queue:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 194755c9ddfe..52ea6f350b18 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -476,6 +476,27 @@ struct ufs_stats {
struct ufs_event_hist event[UFS_EVT_CNT];
};
+/**
+ * enum ufshcd_state - UFS host controller state
+ * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
+ * processing.
+ * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
+ * SCSI commands.
+ * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
+ * SCSI commands may be submitted to the controller.
+ * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
+ * newly submitted SCSI commands with error code DID_BAD_TARGET.
+ * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
+ * failed. Fail all SCSI commands with error code DID_ERROR.
+ */
+enum ufshcd_state {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_ERROR,
+};
+
enum ufshcd_quirks {
/* Interrupt aggregation support is broken */
UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
@@ -641,6 +662,31 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
+#ifdef CONFIG_SCSI_UFS_HPB
+/**
+ * struct ufshpb_dev_info - UFSHPB device related info
+ * @num_lu: the number of user logical unit to check whether all lu finished
+ * initialization
+ * @rgn_size: device reported HPB region size
+ * @srgn_size: device reported HPB sub-region size
+ * @slave_conf_cnt: counter to check all lu finished initialization
+ * @hpb_disabled: flag to check if HPB is disabled
+ * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
+ * @is_legacy: flag to check HPB 1.0
+ * @control_mode: either host or device
+ */
+struct ufshpb_dev_info {
+ int num_lu;
+ int rgn_size;
+ int srgn_size;
+ atomic_t slave_conf_cnt;
+ bool hpb_disabled;
+ u8 max_hpb_single_cmd;
+ bool is_legacy;
+ u8 control_mode;
+};
+#endif
+
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -674,6 +720,7 @@ struct ufs_hba_monitor {
* @lrb: local reference block
* @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
* @nutrs: Transfer Request Queue depth supported by controller
@@ -683,19 +730,17 @@ struct ufs_hba_monitor {
* @priv: pointer to variant specific private data
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for uic command
+ * @uic_cmd_mutex: mutex for UIC command
* @tmf_tag_set: TMF tag set.
* @tmf_queue: Used to allocate TMF tags.
* @pwr_done: completion for power mode change
- * @ufshcd_state: UFSHCD states
+ * @ufshcd_state: UFSHCD state
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts
- * @eh_wq: Workqueue that eh_work works on
- * @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
@@ -760,6 +805,7 @@ struct ufs_hba {
struct ufshcd_lrb *lrb;
unsigned long outstanding_tasks;
+ spinlock_t outstanding_lock;
unsigned long outstanding_reqs;
u32 capabilities;
@@ -785,7 +831,7 @@ struct ufs_hba {
struct mutex uic_cmd_mutex;
struct completion *uic_async_done;
- u32 ufshcd_state;
+ enum ufshcd_state ufshcd_state;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask; /* Exception event mask */
@@ -797,8 +843,6 @@ struct ufs_hba {
struct semaphore host_sem;
/* Work Queues */
- struct workqueue_struct *eh_wq;
- struct work_struct eh_work;
struct work_struct eeh_work;
/* HBA Errors */
@@ -851,6 +895,10 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
+#ifdef CONFIG_SCSI_UFS_HPB
+ struct ufshpb_dev_info ufshpb_dev;
+#endif
+
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -893,16 +941,8 @@ static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
-/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
-#ifndef CONFIG_SCSI_UFS_DWC
- if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
- return true;
- else
- return false;
-#else
-return true;
-#endif
+ return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
}
static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
@@ -1009,11 +1049,14 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
-extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
-extern int ufshcd_runtime_resume(struct ufs_hba *hba);
-extern int ufshcd_runtime_idle(struct ufs_hba *hba);
-extern int ufshcd_system_suspend(struct ufs_hba *hba);
-extern int ufshcd_system_resume(struct ufs_hba *hba);
+#ifdef CONFIG_PM
+extern int ufshcd_runtime_suspend(struct device *dev);
+extern int ufshcd_runtime_resume(struct device *dev);
+#endif
+#ifdef CONFIG_PM_SLEEP
+extern int ufshcd_system_suspend(struct device *dev);
+extern int ufshcd_system_resume(struct device *dev);
+#endif
extern int ufshcd_shutdown(struct ufs_hba *hba);
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -1096,6 +1139,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
u8 param_offset,
u8 *param_read_buf,
u8 param_size);
+int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val);
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
@@ -1160,11 +1206,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
-{
- return (hba->ufs_version >= ufshci_version(3, 0));
-}
-
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
@@ -1226,18 +1267,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
-static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
- bool is_scsi_cmd)
-{
- if (hba->vops && hba->vops->setup_xfer_req) {
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-}
-
static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
int tag, u8 tm_function)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5affb1fce5ad..de95be5d11d4 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,7 +39,6 @@ enum {
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
- REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
new file mode 100644
index 000000000000..9acce92a356b
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -0,0 +1,2933 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/async.h>
+
+#include "ufshcd.h"
+#include "ufshpb.h"
+#include "../sd.h"
+
+#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
+#define READ_TO_MS 1000
+#define READ_TO_EXPIRIES 100
+#define POLLING_INTERVAL_MS 200
+#define THROTTLE_MAP_REQ_DEFAULT 1
+
+/* memory management */
+static struct kmem_cache *ufshpb_mctx_cache;
+static mempool_t *ufshpb_mctx_pool;
+static mempool_t *ufshpb_page_pool;
+/* A cache size of 2MB can cache ppn in the 1GB range. */
+static unsigned int ufshpb_host_map_kbytes = 2048;
+static int tot_active_srgn_pages;
+
+static struct workqueue_struct *ufshpb_wq;
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx);
+
+bool ufshpb_is_allowed(struct ufs_hba *hba)
+{
+ return !(hba->ufshpb_dev.hpb_disabled);
+}
+
+/* HPB version 1.0 is called as legacy version. */
+bool ufshpb_is_legacy(struct ufs_hba *hba)
+{
+ return hba->ufshpb_dev.is_legacy;
+}
+
+static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
+{
+ return sdev->hostdata;
+}
+
+static int ufshpb_get_state(struct ufshpb_lu *hpb)
+{
+ return atomic_read(&hpb->hpb_state);
+}
+
+static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
+{
+ atomic_set(&hpb->hpb_state, state);
+}
+
+static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ return rgn->rgn_state != HPB_RGN_INACTIVE &&
+ srgn->srgn_state == HPB_SRGN_VALID;
+}
+
+static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
+{
+ return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
+}
+
+static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
+{
+ return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
+ op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
+}
+
+static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
+{
+ return transfer_len <= hpb->pre_req_max_tr_len;
+}
+
+/*
+ * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
+ * default. It is possible to change range of transfer_len through sysfs.
+ */
+static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
+{
+ return len > hpb->pre_req_min_tr_len &&
+ len <= hpb->pre_req_max_tr_len;
+}
+
+static bool ufshpb_is_general_lun(int lun)
+{
+ return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
+{
+ if (hpb->lu_pinned_end != PINNED_NOT_SET &&
+ rgn_idx >= hpb->lu_pinned_start &&
+ rgn_idx <= hpb->lu_pinned_end)
+ return true;
+
+ return false;
+}
+
+static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
+{
+ bool ret = false;
+ unsigned long flags;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ return;
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
+ ret = true;
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ if (ret)
+ queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp,
+ struct utp_hpb_rsp *rsp_field)
+{
+ /* Check HPB_UPDATE_ALERT */
+ if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
+ UPIU_HEADER_DWORD(0, 2, 0, 0)))
+ return false;
+
+ if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
+ rsp_field->desc_type != DEV_DES_TYPE ||
+ rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
+ rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
+ rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
+ rsp_field->hpb_op == HPB_RSP_NONE ||
+ (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
+ !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
+ return false;
+
+ if (!ufshpb_is_general_lun(rsp_field->lun)) {
+ dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
+ lrbp->lun);
+ return false;
+ }
+
+ return true;
+}
+
+static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
+ int srgn_offset, int cnt, bool set_dirty)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn, *prev_srgn = NULL;
+ int set_bit_len;
+ int bitmap_len;
+ unsigned long flags;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (likely(!srgn->is_last))
+ bitmap_len = hpb->entries_per_srgn;
+ else
+ bitmap_len = hpb->last_srgn_entries;
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ set_bit_len = bitmap_len - srgn_offset;
+ else
+ set_bit_len = cnt;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ if (set_dirty) {
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
+ set_bit_len);
+ } else if (hpb->is_hcm) {
+ /* rewind the read timer for lru regions */
+ rgn->read_timeout = ktime_add_ms(ktime_get(),
+ rgn->hpb->params.read_timeout_ms);
+ rgn->read_timeout_expiries =
+ rgn->hpb->params.read_timeout_expiries;
+ }
+ }
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ if (hpb->is_hcm && prev_srgn != srgn) {
+ bool activate = false;
+
+ spin_lock(&rgn->rgn_lock);
+ if (set_dirty) {
+ rgn->reads -= srgn->reads;
+ srgn->reads = 0;
+ set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+ } else {
+ srgn->reads++;
+ rgn->reads++;
+ if (srgn->reads == hpb->params.activation_thld)
+ activate = true;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (activate ||
+ test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "activate region %d-%d\n", rgn_idx, srgn_idx);
+ }
+
+ prev_srgn = srgn;
+ }
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= set_bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+}
+
+static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx, int srgn_offset, int cnt)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int bitmap_len;
+ int bit_len;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (likely(!srgn->is_last))
+ bitmap_len = hpb->entries_per_srgn;
+ else
+ bitmap_len = hpb->last_srgn_entries;
+
+ if (!ufshpb_is_valid_srgn(rgn, srgn))
+ return true;
+
+ /*
+ * If the region state is active, mctx must be allocated.
+ * In this case, check whether the region is evicted or
+ * mctx allcation fail.
+ */
+ if (unlikely(!srgn->mctx)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ return true;
+ }
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ bit_len = bitmap_len - srgn_offset;
+ else
+ bit_len = cnt;
+
+ if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
+ srgn_offset) < bit_len + srgn_offset)
+ return true;
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+
+ return false;
+}
+
+static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
+{
+ return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+}
+
+static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
+ struct ufshpb_map_ctx *mctx, int pos,
+ int len, __be64 *ppn_buf)
+{
+ struct page *page;
+ int index, offset;
+ int copied;
+
+ index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
+ offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
+
+ if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
+ copied = len;
+ else
+ copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
+
+ page = mctx->m_page[index];
+ if (unlikely(!page)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "error. cannot find page in mctx\n");
+ return -ENOMEM;
+ }
+
+ memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
+ copied * HPB_ENTRY_SIZE);
+
+ return copied;
+}
+
+static void
+ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
+ int *srgn_idx, int *offset)
+{
+ int rgn_offset;
+
+ *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
+ rgn_offset = lpn & hpb->entries_per_rgn_mask;
+ *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
+ *offset = rgn_offset & hpb->entries_per_srgn_mask;
+}
+
+static void
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
+ struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
+ u8 transfer_len, int read_id)
+{
+ unsigned char *cdb = lrbp->cmd->cmnd;
+ __be64 ppn_tmp = ppn;
+ cdb[0] = UFSHPB_READ;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
+ ppn_tmp = swab64(ppn);
+
+ /* ppn value is stored as big-endian in the host memory */
+ memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
+ cdb[14] = transfer_len;
+ cdb[15] = read_id;
+
+ lrbp->cmd->cmd_len = UFS_CDB_SIZE;
+}
+
+static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
+ unsigned long lpn, unsigned int len,
+ int read_id)
+{
+ cdb[0] = UFSHPB_WRITE_BUFFER;
+ cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
+
+ put_unaligned_be32(lpn, &cdb[2]);
+ cdb[6] = read_id;
+ put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
+
+ cdb[9] = 0x00; /* Control = 0x00 */
+}
+
+static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req;
+
+ if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+ "pre_req throttle. inflight %d throttle %d",
+ hpb->num_inflight_pre_req, hpb->throttle_pre_req);
+ return NULL;
+ }
+
+ pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
+ struct ufshpb_req, list_req);
+ if (!pre_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
+ return NULL;
+ }
+
+ list_del_init(&pre_req->list_req);
+ hpb->num_inflight_pre_req++;
+
+ return pre_req;
+}
+
+static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *pre_req)
+{
+ pre_req->req = NULL;
+ bio_reset(pre_req->bio);
+ list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+ hpb->num_inflight_pre_req--;
+}
+
+static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
+ struct ufshpb_lu *hpb = pre_req->hpb;
+ unsigned long flags;
+
+ if (error) {
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_sense_hdr sshdr;
+
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
+ scsi_command_normalize_sense(cmd, &sshdr);
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "code %x sense_key %x asc %x ascq %x",
+ sshdr.response_code,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "byte4 %x byte5 %x byte6 %x additional_len %x",
+ sshdr.byte4, sshdr.byte5,
+ sshdr.byte6, sshdr.additional_length);
+ }
+
+ blk_mq_free_request(req);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_put_pre_req(pre_req->hpb, pre_req);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
+{
+ struct ufshpb_lu *hpb = pre_req->hpb;
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ __be64 *addr;
+ int offset = 0;
+ int copied;
+ unsigned long lpn = pre_req->wb.lpn;
+ int rgn_idx, srgn_idx, srgn_offset;
+ unsigned long flags;
+
+ addr = page_address(page);
+ ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+next_offset:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (!ufshpb_is_valid_srgn(rgn, srgn))
+ goto mctx_error;
+
+ if (!srgn->mctx)
+ goto mctx_error;
+
+ copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
+ pre_req->wb.len - offset,
+ &addr[offset]);
+
+ if (copied < 0)
+ goto mctx_error;
+
+ offset += copied;
+ srgn_offset += copied;
+
+ if (srgn_offset == hpb->entries_per_srgn) {
+ srgn_offset = 0;
+
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+ }
+
+ if (offset < pre_req->wb.len)
+ goto next_offset;
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return 0;
+mctx_error:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return -ENOMEM;
+}
+
+static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
+ struct request_queue *q,
+ struct ufshpb_req *pre_req)
+{
+ struct page *page = pre_req->wb.m_page;
+ struct bio *bio = pre_req->bio;
+ int entries_bytes, ret;
+
+ if (!page)
+ return -ENOMEM;
+
+ if (ufshpb_prep_entry(pre_req, page))
+ return -ENOMEM;
+
+ entries_bytes = pre_req->wb.len * sizeof(__be64);
+
+ ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
+ if (ret != entries_bytes) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "bio_add_pc_page fail: %d", ret);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
+{
+ if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
+ hpb->cur_read_id = 1;
+ return hpb->cur_read_id;
+}
+
+static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+ struct ufshpb_req *pre_req, int read_id)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = sdev->request_queue;
+ struct request *req;
+ struct scsi_request *rq;
+ struct bio *bio = pre_req->bio;
+
+ pre_req->hpb = hpb;
+ pre_req->wb.lpn = sectors_to_logical(cmd->device,
+ blk_rq_pos(scsi_cmd_to_rq(cmd)));
+ pre_req->wb.len = sectors_to_logical(cmd->device,
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+ if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
+ return -ENOMEM;
+
+ req = pre_req->req;
+
+ /* 1. request setup */
+ blk_rq_append_bio(req, bio);
+ req->rq_disk = NULL;
+ req->end_io_data = (void *)pre_req;
+ req->end_io = ufshpb_pre_req_compl_fn;
+
+ /* 2. scsi_request setup */
+ rq = scsi_req(req);
+ rq->retries = 1;
+
+ ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
+ read_id);
+ rq->cmd_len = scsi_command_size(rq->cmd);
+
+ if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
+ return -EAGAIN;
+
+ hpb->stats.pre_req_cnt++;
+
+ return 0;
+}
+
+static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+ int *read_id)
+{
+ struct ufshpb_req *pre_req;
+ struct request *req = NULL;
+ unsigned long flags;
+ int _read_id;
+ int ret = 0;
+
+ req = blk_get_request(cmd->device->request_queue,
+ REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(req))
+ return -EAGAIN;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ pre_req = ufshpb_get_pre_req(hpb);
+ if (!pre_req) {
+ ret = -EAGAIN;
+ goto unlock_out;
+ }
+ _read_id = ufshpb_get_read_id(hpb);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ pre_req->req = req;
+
+ ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
+ if (ret)
+ goto free_pre_req;
+
+ *read_id = _read_id;
+
+ return ret;
+free_pre_req:
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_put_pre_req(hpb, pre_req);
+unlock_out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ blk_put_request(req);
+ return ret;
+}
+
+/*
+ * This function will set up HPB read command using host-side L2P map data.
+ */
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_lu *hpb;
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ u32 lpn;
+ __be64 ppn;
+ unsigned long flags;
+ int transfer_len, rgn_idx, srgn_idx, srgn_offset;
+ int read_id = 0;
+ int err = 0;
+
+ hpb = ufshpb_get_hpb_data(cmd->device);
+ if (!hpb)
+ return -ENODEV;
+
+ if (ufshpb_get_state(hpb) == HPB_INIT)
+ return -ENODEV;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT", __func__);
+ return -ENODEV;
+ }
+
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
+ (!ufshpb_is_write_or_discard(cmd) &&
+ !ufshpb_is_read_cmd(cmd)))
+ return 0;
+
+ transfer_len = sectors_to_logical(cmd->device,
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+ if (unlikely(!transfer_len))
+ return 0;
+
+ lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
+ ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ /* If command type is WRITE or DISCARD, set bitmap as drity */
+ if (ufshpb_is_write_or_discard(cmd)) {
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, true);
+ return 0;
+ }
+
+ if (!ufshpb_is_supported_chunk(hpb, transfer_len))
+ return 0;
+
+ WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
+
+ if (hpb->is_hcm) {
+ /*
+ * in host control mode, reads are the main source for
+ * activation trials.
+ */
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, false);
+
+ /* keep those counters normalized */
+ if (rgn->reads > hpb->entries_per_srgn)
+ schedule_work(&hpb->ufshpb_normalization_work);
+ }
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len)) {
+ hpb->stats.miss_cnt++;
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return 0;
+ }
+
+ err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ if (unlikely(err < 0)) {
+ /*
+ * In this case, the region state is active,
+ * but the ppn table is not allocated.
+ * Make sure that ppn table must be allocated on
+ * active state.
+ */
+ dev_err(hba->dev, "get ppn failed. err %d\n", err);
+ return err;
+ }
+ if (!ufshpb_is_legacy(hba) &&
+ ufshpb_is_required_wb(hpb, transfer_len)) {
+ err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
+ if (err) {
+ unsigned long timeout;
+
+ timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
+ hpb->params.requeue_timeout_ms);
+
+ if (time_before(jiffies, timeout))
+ return -EAGAIN;
+
+ hpb->stats.miss_cnt++;
+ return 0;
+ }
+ }
+
+ ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
+ read_id);
+
+ hpb->stats.hit_cnt++;
+ return 0;
+}
+
+static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
+ int rgn_idx, enum req_opf dir,
+ bool atomic)
+{
+ struct ufshpb_req *rq;
+ struct request *req;
+ int retries = HPB_MAP_REQ_RETRIES;
+
+ rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+retry:
+ req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+ BLK_MQ_REQ_NOWAIT);
+
+ if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
+ usleep_range(3000, 3100);
+ goto retry;
+ }
+
+ if (IS_ERR(req))
+ goto free_rq;
+
+ rq->hpb = hpb;
+ rq->req = req;
+ rq->rb.rgn_idx = rgn_idx;
+
+ return rq;
+
+free_rq:
+ kmem_cache_free(hpb->map_req_cache, rq);
+ return NULL;
+}
+
+static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
+{
+ blk_put_request(rq->req);
+ kmem_cache_free(hpb->map_req_cache, rq);
+}
+
+static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_req *map_req;
+ struct bio *bio;
+ unsigned long flags;
+
+ if (hpb->is_hcm &&
+ hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+ "map_req throttle. inflight %d throttle %d",
+ hpb->num_inflight_map_req,
+ hpb->params.inflight_map_req);
+ return NULL;
+ }
+
+ map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
+ if (!map_req)
+ return NULL;
+
+ bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+ if (!bio) {
+ ufshpb_put_req(hpb, map_req);
+ return NULL;
+ }
+
+ map_req->bio = bio;
+
+ map_req->rb.srgn_idx = srgn->srgn_idx;
+ map_req->rb.mctx = srgn->mctx;
+
+ spin_lock_irqsave(&hpb->param_lock, flags);
+ hpb->num_inflight_map_req++;
+ spin_unlock_irqrestore(&hpb->param_lock, flags);
+
+ return map_req;
+}
+
+static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *map_req)
+{
+ unsigned long flags;
+
+ bio_put(map_req->bio);
+ ufshpb_put_req(hpb, map_req);
+
+ spin_lock_irqsave(&hpb->param_lock, flags);
+ hpb->num_inflight_map_req--;
+ spin_unlock_irqrestore(&hpb->param_lock, flags);
+}
+
+static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_region *rgn;
+ u32 num_entries = hpb->entries_per_srgn;
+
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ return -1;
+ }
+
+ if (unlikely(srgn->is_last))
+ num_entries = hpb->last_srgn_entries;
+
+ bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+ clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+
+ return 0;
+}
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ list_del_init(&rgn->list_inact_rgn);
+
+ if (list_empty(&srgn->list_act_srgn))
+ list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+
+ hpb->stats.rb_active_cnt++;
+}
+
+static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ list_del_init(&srgn->list_act_srgn);
+
+ if (list_empty(&rgn->list_inact_rgn))
+ list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
+
+ hpb->stats.rb_inactive_cnt++;
+}
+
+static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_region *rgn;
+
+ /*
+ * If there is no mctx in subregion
+ * after I/O progress for HPB_READ_BUFFER, the region to which the
+ * subregion belongs was evicted.
+ * Make sure the region must not evict in I/O progress
+ */
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ return;
+ }
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+
+ if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "region %d subregion %d evicted\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ return;
+ }
+ srgn->srgn_state = HPB_SRGN_VALID;
+}
+
+static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
+
+ ufshpb_put_req(umap_req->hpb, umap_req);
+}
+
+static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
+ struct ufshpb_lu *hpb = map_req->hpb;
+ struct ufshpb_subregion *srgn;
+ unsigned long flags;
+
+ srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
+ map_req->rb.srgn_idx;
+
+ ufshpb_clear_dirty_bitmap(hpb, srgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_activate_subregion(hpb, srgn);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ ufshpb_put_map_req(map_req->hpb, map_req);
+}
+
+static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
+{
+ cdb[0] = UFSHPB_WRITE_BUFFER;
+ cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
+ UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
+ if (rgn)
+ put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
+ cdb[9] = 0x00;
+}
+
+static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
+ int srgn_idx, int srgn_mem_size)
+{
+ cdb[0] = UFSHPB_READ_BUFFER;
+ cdb[1] = UFSHPB_READ_BUFFER_ID;
+
+ put_unaligned_be16(rgn_idx, &cdb[2]);
+ put_unaligned_be16(srgn_idx, &cdb[4]);
+ put_unaligned_be24(srgn_mem_size, &cdb[6]);
+
+ cdb[9] = 0x00;
+}
+
+static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *umap_req,
+ struct ufshpb_region *rgn)
+{
+ struct request *req;
+ struct scsi_request *rq;
+
+ req = umap_req->req;
+ req->timeout = 0;
+ req->end_io_data = (void *)umap_req;
+ rq = scsi_req(req);
+ ufshpb_set_unmap_cmd(rq->cmd, rgn);
+ rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
+
+ blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
+
+ hpb->stats.umap_req_cnt++;
+}
+
+static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *map_req, bool last)
+{
+ struct request_queue *q;
+ struct request *req;
+ struct scsi_request *rq;
+ int mem_size = hpb->srgn_mem_size;
+ int ret = 0;
+ int i;
+
+ q = hpb->sdev_ufs_lu->request_queue;
+ for (i = 0; i < hpb->pages_per_srgn; i++) {
+ ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
+ PAGE_SIZE, 0);
+ if (ret != PAGE_SIZE) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "bio_add_pc_page fail %d - %d\n",
+ map_req->rb.rgn_idx, map_req->rb.srgn_idx);
+ return ret;
+ }
+ }
+
+ req = map_req->req;
+
+ blk_rq_append_bio(req, map_req->bio);
+
+ req->end_io_data = map_req;
+
+ rq = scsi_req(req);
+
+ if (unlikely(last))
+ mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
+
+ ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
+ map_req->rb.srgn_idx, mem_size);
+ rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
+
+ blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
+
+ hpb->stats.map_req_cnt++;
+ return 0;
+}
+
+static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
+ bool last)
+{
+ struct ufshpb_map_ctx *mctx;
+ u32 num_entries = hpb->entries_per_srgn;
+ int i, j;
+
+ mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
+ if (!mctx)
+ return NULL;
+
+ mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
+ if (!mctx->m_page)
+ goto release_mctx;
+
+ if (unlikely(last))
+ num_entries = hpb->last_srgn_entries;
+
+ mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!mctx->ppn_dirty)
+ goto release_m_page;
+
+ for (i = 0; i < hpb->pages_per_srgn; i++) {
+ mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
+ if (!mctx->m_page[i]) {
+ for (j = 0; j < i; j++)
+ mempool_free(mctx->m_page[j], ufshpb_page_pool);
+ goto release_ppn_dirty;
+ }
+ clear_page(page_address(mctx->m_page[i]));
+ }
+
+ return mctx;
+
+release_ppn_dirty:
+ bitmap_free(mctx->ppn_dirty);
+release_m_page:
+ kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+release_mctx:
+ mempool_free(mctx, ufshpb_mctx_pool);
+ return NULL;
+}
+
+static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
+ struct ufshpb_map_ctx *mctx)
+{
+ int i;
+
+ for (i = 0; i < hpb->pages_per_srgn; i++)
+ mempool_free(mctx->m_page[i], ufshpb_page_pool);
+
+ bitmap_free(mctx->ppn_dirty);
+ kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+ mempool_free(mctx, ufshpb_mctx_pool);
+}
+
+static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (srgn->srgn_state == HPB_SRGN_ISSUED)
+ return -EPERM;
+
+ return 0;
+}
+
+static void ufshpb_read_to_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_read_to_work.work);
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn, *next_rgn;
+ unsigned long flags;
+ unsigned int poll;
+ LIST_HEAD(expired_list);
+
+ if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
+ return;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
+ list_lru_rgn) {
+ bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
+
+ if (timedout) {
+ rgn->read_timeout_expiries--;
+ if (is_rgn_dirty(rgn) ||
+ rgn->read_timeout_expiries == 0)
+ list_add(&rgn->list_expired_rgn, &expired_list);
+ else
+ rgn->read_timeout = ktime_add_ms(ktime_get(),
+ hpb->params.read_timeout_ms);
+ }
+ }
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &expired_list,
+ list_expired_rgn) {
+ list_del_init(&rgn->list_expired_rgn);
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ }
+
+ ufshpb_kick_map_work(hpb);
+
+ clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
+
+ poll = hpb->params.timeout_polling_interval_ms;
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+}
+
+static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ rgn->rgn_state = HPB_RGN_ACTIVE;
+ list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+ atomic_inc(&lru_info->active_cnt);
+ if (rgn->hpb->is_hcm) {
+ rgn->read_timeout =
+ ktime_add_ms(ktime_get(),
+ rgn->hpb->params.read_timeout_ms);
+ rgn->read_timeout_expiries =
+ rgn->hpb->params.read_timeout_expiries;
+ }
+}
+
+static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+}
+
+static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+{
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn, *victim_rgn = NULL;
+
+ list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
+ if (!rgn) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: no region allocated\n",
+ __func__);
+ return NULL;
+ }
+ if (ufshpb_check_srgns_issue_state(hpb, rgn))
+ continue;
+
+ /*
+ * in host control mode, verify that the exiting region
+ * has fewer reads
+ */
+ if (hpb->is_hcm &&
+ rgn->reads > hpb->params.eviction_thld_exit)
+ continue;
+
+ victim_rgn = rgn;
+ break;
+ }
+
+ return victim_rgn;
+}
+
+static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ list_del_init(&rgn->list_lru_rgn);
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+ atomic_dec(&lru_info->active_cnt);
+}
+
+static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ srgn->mctx = NULL;
+ }
+}
+
+static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ bool atomic)
+{
+ struct ufshpb_req *umap_req;
+ int rgn_idx = rgn ? rgn->rgn_idx : 0;
+
+ umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
+ if (!umap_req)
+ return -ENOMEM;
+
+ ufshpb_execute_umap_req(hpb, umap_req, rgn);
+
+ return 0;
+}
+
+static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ return ufshpb_issue_umap_req(hpb, rgn, true);
+}
+
+static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
+{
+ return ufshpb_issue_umap_req(hpb, NULL, false);
+}
+
+static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct victim_select_info *lru_info;
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ lru_info = &hpb->lru_info;
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
+
+ ufshpb_cleanup_lru_info(lru_info, rgn);
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ ufshpb_purge_active_subregion(hpb, srgn);
+}
+
+static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (rgn->rgn_state == HPB_RGN_PINNED) {
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "pinned region cannot drop-out. region %d\n",
+ rgn->rgn_idx);
+ goto out;
+ }
+
+ if (!list_empty(&rgn->list_lru_rgn)) {
+ if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (hpb->is_hcm) {
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ ret = ufshpb_issue_umap_single_req(hpb, rgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (ret)
+ goto out;
+ }
+
+ __ufshpb_evict_region(hpb, rgn);
+ }
+out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return ret;
+}
+
+static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_req *map_req;
+ unsigned long flags;
+ int ret;
+ int err = -EAGAIN;
+ bool alloc_required = false;
+ enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT\n", __func__);
+ goto unlock_out;
+ }
+
+ if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
+ (srgn->srgn_state == HPB_SRGN_INVALID)) {
+ err = 0;
+ goto unlock_out;
+ }
+
+ if (srgn->srgn_state == HPB_SRGN_UNUSED)
+ alloc_required = true;
+
+ /*
+ * If the subregion is already ISSUED state,
+ * a specific event (e.g., GC or wear-leveling, etc.) occurs in
+ * the device and HPB response for map loading is received.
+ * In this case, after finishing the HPB_READ_BUFFER,
+ * the next HPB_READ_BUFFER is performed again to obtain the latest
+ * map data.
+ */
+ if (srgn->srgn_state == HPB_SRGN_ISSUED)
+ goto unlock_out;
+
+ srgn->srgn_state = HPB_SRGN_ISSUED;
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ if (alloc_required) {
+ srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "get map_ctx failed. region %d - %d\n",
+ rgn->rgn_idx, srgn->srgn_idx);
+ state = HPB_SRGN_UNUSED;
+ goto change_srgn_state;
+ }
+ }
+
+ map_req = ufshpb_get_map_req(hpb, srgn);
+ if (!map_req)
+ goto change_srgn_state;
+
+
+ ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
+ if (ret) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: issue map_req failed: %d, region %d - %d\n",
+ __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
+ goto free_map_req;
+ }
+ return 0;
+
+free_map_req:
+ ufshpb_put_map_req(hpb, map_req);
+change_srgn_state:
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ srgn->srgn_state = state;
+unlock_out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return err;
+}
+
+static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+ struct ufshpb_region *victim_rgn = NULL;
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ /*
+ * If region belongs to lru_list, just move the region
+ * to the front of lru list because the state of the region
+ * is already active-state.
+ */
+ if (!list_empty(&rgn->list_lru_rgn)) {
+ ufshpb_hit_lru_info(lru_info, rgn);
+ goto out;
+ }
+
+ if (rgn->rgn_state == HPB_RGN_INACTIVE) {
+ if (atomic_read(&lru_info->active_cnt) ==
+ lru_info->max_lru_active_cnt) {
+ /*
+ * If the maximum number of active regions
+ * is exceeded, evict the least recently used region.
+ * This case may occur when the device responds
+ * to the eviction information late.
+ * It is okay to evict the least recently used region,
+ * because the device could detect this region
+ * by not issuing HPB_READ
+ *
+ * in host control mode, verify that the entering
+ * region has enough reads
+ */
+ if (hpb->is_hcm &&
+ rgn->reads < hpb->params.eviction_thld_enter) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ victim_rgn = ufshpb_victim_lru_info(hpb);
+ if (!victim_rgn) {
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "cannot get victim region %s\n",
+ hpb->is_hcm ? "" : "error");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "LRU full (%d), choose victim %d\n",
+ atomic_read(&lru_info->active_cnt),
+ victim_rgn->rgn_idx);
+
+ if (hpb->is_hcm) {
+ spin_unlock_irqrestore(&hpb->rgn_state_lock,
+ flags);
+ ret = ufshpb_issue_umap_single_req(hpb,
+ victim_rgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock,
+ flags);
+ if (ret)
+ goto out;
+ }
+
+ __ufshpb_evict_region(hpb, victim_rgn);
+ }
+
+ /*
+ * When a region is added to lru_info list_head,
+ * it is guaranteed that the subregion has been
+ * assigned all mctx. If failed, try to receive mctx again
+ * without being added to lru_info list_head
+ */
+ ufshpb_add_lru_info(lru_info, rgn);
+ }
+out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return ret;
+}
+
+static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
+ struct utp_hpb_rsp *rsp_field)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int i, rgn_i, srgn_i;
+
+ BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
+ /*
+ * If the active region and the inactive region are the same,
+ * we will inactivate this region.
+ * The device could check this (region inactivated) and
+ * will response the proper active region information
+ */
+ for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
+ rgn_i =
+ be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
+ srgn_i =
+ be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
+
+ rgn = hpb->rgn_tbl + rgn_i;
+ if (hpb->is_hcm &&
+ (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
+ /*
+ * in host control mode, subregion activation
+ * recommendations are only allowed to active regions.
+ * Also, ignore recommendations for dirty regions - the
+ * host will make decisions concerning those by himself
+ */
+ continue;
+ }
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
+
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_active_info(hpb, rgn_i, srgn_i);
+ spin_unlock(&hpb->rsp_list_lock);
+
+ srgn = rgn->srgn_tbl + srgn_i;
+
+ /* blocking HPB_READ */
+ spin_lock(&hpb->rgn_state_lock);
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ spin_unlock(&hpb->rgn_state_lock);
+ }
+
+ if (hpb->is_hcm) {
+ /*
+ * in host control mode the device is not allowed to inactivate
+ * regions
+ */
+ goto out;
+ }
+
+ for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
+ rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "inactivate(%d) region %d\n", i, rgn_i);
+
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_inactive_info(hpb, rgn_i);
+ spin_unlock(&hpb->rsp_list_lock);
+
+ rgn = hpb->rgn_tbl + rgn_i;
+
+ spin_lock(&hpb->rgn_state_lock);
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
+ srgn = rgn->srgn_tbl + srgn_i;
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ }
+ }
+ spin_unlock(&hpb->rgn_state_lock);
+
+ }
+
+out:
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
+ rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
+
+ if (ufshpb_get_state(hpb) == HPB_PRESENT)
+ queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
+{
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
+ set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+/*
+ * This function will parse recommended active subregion information in sense
+ * data field of response UPIU with SAM_STAT_GOOD state.
+ */
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
+ struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
+ int data_seg_len;
+
+ if (unlikely(lrbp->lun != rsp_field->lun)) {
+ struct scsi_device *sdev;
+ bool found = false;
+
+ __shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+
+ if (!hpb)
+ continue;
+
+ if (rsp_field->lun == hpb->lun) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+ }
+
+ if (!hpb)
+ return;
+
+ if (ufshpb_get_state(hpb) == HPB_INIT)
+ return;
+
+ if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+ (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT/SUSPEND\n",
+ __func__);
+ return;
+ }
+
+ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+ & MASK_RSP_UPIU_DATA_SEG_LEN;
+
+ /* To flush remained rsp_list, we queue the map_work task */
+ if (!data_seg_len) {
+ if (!ufshpb_is_general_lun(hpb->lun))
+ return;
+
+ ufshpb_kick_map_work(hpb);
+ return;
+ }
+
+ BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
+
+ if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
+ return;
+
+ hpb->stats.rb_noti_cnt++;
+
+ switch (rsp_field->hpb_op) {
+ case HPB_RSP_REQ_REGION_UPDATE:
+ if (data_seg_len != DEV_DATA_SEG_LEN)
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: data seg length is not same.\n",
+ __func__);
+ ufshpb_rsp_req_region_update(hpb, rsp_field);
+ break;
+ case HPB_RSP_DEV_RESET:
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "UFS device lost HPB information during PM.\n");
+
+ if (hpb->is_hcm) {
+ struct scsi_device *sdev;
+
+ __shost_for_each_device(sdev, hba->host) {
+ struct ufshpb_lu *h = sdev->hostdata;
+
+ if (h)
+ ufshpb_dev_reset_handler(h);
+ }
+ }
+
+ break;
+ default:
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "hpb_op is not available: %d\n",
+ rsp_field->hpb_op);
+ break;
+ }
+}
+
+static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ if (!list_empty(&rgn->list_inact_rgn))
+ return;
+
+ if (!list_empty(&srgn->list_act_srgn)) {
+ list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+ return;
+ }
+
+ list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+}
+
+static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct list_head *pending_list)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ if (!list_empty(&rgn->list_inact_rgn))
+ return;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (!list_empty(&srgn->list_act_srgn))
+ return;
+
+ list_add_tail(&rgn->list_inact_rgn, pending_list);
+}
+
+static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
+ struct ufshpb_subregion,
+ list_act_srgn))) {
+ if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+ break;
+
+ list_del_init(&srgn->list_act_srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+ ret = ufshpb_add_region(hpb, rgn);
+ if (ret)
+ goto active_failed;
+
+ ret = ufshpb_issue_map_req(hpb, rgn, srgn);
+ if (ret) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "issue map_req failed. ret %d, region %d - %d\n",
+ ret, rgn->rgn_idx, srgn->srgn_idx);
+ goto active_failed;
+ }
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ return;
+
+active_failed:
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
+ rgn->rgn_idx, srgn->srgn_idx);
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_add_active_list(hpb, rgn, srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn;
+ unsigned long flags;
+ int ret;
+ LIST_HEAD(pending_list);
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
+ struct ufshpb_region,
+ list_inact_rgn))) {
+ if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+ break;
+
+ list_del_init(&rgn->list_inact_rgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ ret = ufshpb_evict_region(hpb, rgn);
+ if (ret) {
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ }
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ }
+
+ list_splice(&pending_list, &hpb->lh_inact_rgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_normalization_work_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_normalization_work);
+ int rgn_idx;
+ u8 factor = hpb->params.normalization_factor;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
+ int srgn_idx;
+
+ spin_lock(&rgn->rgn_lock);
+ rgn->reads = 0;
+ for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
+ struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
+
+ srgn->reads >>= factor;
+ rgn->reads += srgn->reads;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
+ continue;
+
+ /* if region is active but has no reads - inactivate it */
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock(&hpb->rsp_list_lock);
+ }
+}
+
+static void ufshpb_map_work_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT\n", __func__);
+ return;
+ }
+
+ ufshpb_run_inactive_region_list(hpb);
+ ufshpb_run_active_subregion_list(hpb);
+}
+
+/*
+ * this function doesn't need to hold lock due to be called in init.
+ * (rgn_state_lock, rsp_list_lock, etc..)
+ */
+static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
+ struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx, i;
+ int err = 0;
+
+ for_each_sub_region(rgn, srgn_idx, srgn) {
+ srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ if (!srgn->mctx) {
+ err = -ENOMEM;
+ dev_err(hba->dev,
+ "alloc mctx for pinned region failed\n");
+ goto release;
+ }
+
+ list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+ }
+
+ rgn->rgn_state = HPB_RGN_PINNED;
+ return 0;
+
+release:
+ for (i = 0; i < srgn_idx; i++) {
+ srgn = rgn->srgn_tbl + i;
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ }
+ return err;
+}
+
+static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn, bool last)
+{
+ int srgn_idx;
+ struct ufshpb_subregion *srgn;
+
+ for_each_sub_region(rgn, srgn_idx, srgn) {
+ INIT_LIST_HEAD(&srgn->list_act_srgn);
+
+ srgn->rgn_idx = rgn->rgn_idx;
+ srgn->srgn_idx = srgn_idx;
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ }
+
+ if (unlikely(last && hpb->last_srgn_entries))
+ srgn->is_last = true;
+}
+
+static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn, int srgn_cnt)
+{
+ rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
+ GFP_KERNEL);
+ if (!rgn->srgn_tbl)
+ return -ENOMEM;
+
+ rgn->srgn_cnt = srgn_cnt;
+ return 0;
+}
+
+static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
+ struct ufshpb_lu *hpb,
+ struct ufshpb_dev_info *hpb_dev_info,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ u32 entries_per_rgn;
+ u64 rgn_mem_size, tmp;
+
+ /* for pre_req */
+ hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
+
+ if (ufshpb_is_legacy(hba))
+ hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
+ else
+ hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
+
+ hpb->cur_read_id = 0;
+
+ hpb->lu_pinned_start = hpb_lu_info->pinned_start;
+ hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
+ (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
+ : PINNED_NOT_SET;
+ hpb->lru_info.max_lru_active_cnt =
+ hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
+
+ rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
+ * HPB_ENTRY_SIZE;
+ do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
+ hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
+ * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
+
+ tmp = rgn_mem_size;
+ do_div(tmp, HPB_ENTRY_SIZE);
+ entries_per_rgn = (u32)tmp;
+ hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
+ hpb->entries_per_rgn_mask = entries_per_rgn - 1;
+
+ hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
+ hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
+ hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
+
+ tmp = rgn_mem_size;
+ do_div(tmp, hpb->srgn_mem_size);
+ hpb->srgns_per_rgn = (int)tmp;
+
+ hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+ entries_per_rgn);
+ hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+ (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
+ hpb->last_srgn_entries = hpb_lu_info->num_blocks
+ % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
+
+ hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
+
+ if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
+ hpb->is_hcm = true;
+}
+
+static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn_table, *rgn;
+ int rgn_idx, i;
+ int ret = 0;
+
+ rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
+ GFP_KERNEL);
+ if (!rgn_table)
+ return -ENOMEM;
+
+ hpb->rgn_tbl = rgn_table;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ int srgn_cnt = hpb->srgns_per_rgn;
+ bool last_srgn = false;
+
+ rgn = rgn_table + rgn_idx;
+ rgn->rgn_idx = rgn_idx;
+
+ spin_lock_init(&rgn->rgn_lock);
+
+ INIT_LIST_HEAD(&rgn->list_inact_rgn);
+ INIT_LIST_HEAD(&rgn->list_lru_rgn);
+ INIT_LIST_HEAD(&rgn->list_expired_rgn);
+
+ if (rgn_idx == hpb->rgns_per_lu - 1) {
+ srgn_cnt = ((hpb->srgns_per_lu - 1) %
+ hpb->srgns_per_rgn) + 1;
+ last_srgn = true;
+ }
+
+ ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
+ if (ret)
+ goto release_srgn_table;
+ ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
+
+ if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
+ ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
+ if (ret)
+ goto release_srgn_table;
+ } else {
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+ }
+
+ rgn->rgn_flags = 0;
+ rgn->hpb = hpb;
+ }
+
+ return 0;
+
+release_srgn_table:
+ for (i = 0; i < rgn_idx; i++)
+ kvfree(rgn_table[i].srgn_tbl);
+
+ kvfree(rgn_table);
+ return ret;
+}
+
+static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ int srgn_idx;
+ struct ufshpb_subregion *srgn;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ }
+}
+
+static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
+{
+ int rgn_idx;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ struct ufshpb_region *rgn;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+
+ ufshpb_destroy_subregion_tbl(hpb, rgn);
+ }
+
+ kvfree(rgn->srgn_tbl);
+ }
+
+ kvfree(hpb->rgn_tbl);
+}
+
+/* SYSFS functions */
+#define ufshpb_sysfs_attr_show_func(__name) \
+static ssize_t __name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
+ \
+ if (!hpb) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
+} \
+\
+static DEVICE_ATTR_RO(__name)
+
+ufshpb_sysfs_attr_show_func(hit_cnt);
+ufshpb_sysfs_attr_show_func(miss_cnt);
+ufshpb_sysfs_attr_show_func(rb_noti_cnt);
+ufshpb_sysfs_attr_show_func(rb_active_cnt);
+ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
+ufshpb_sysfs_attr_show_func(map_req_cnt);
+ufshpb_sysfs_attr_show_func(umap_req_cnt);
+
+static struct attribute *hpb_dev_stat_attrs[] = {
+ &dev_attr_hit_cnt.attr,
+ &dev_attr_miss_cnt.attr,
+ &dev_attr_rb_noti_cnt.attr,
+ &dev_attr_rb_active_cnt.attr,
+ &dev_attr_rb_inactive_cnt.attr,
+ &dev_attr_map_req_cnt.attr,
+ &dev_attr_umap_req_cnt.attr,
+ NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_stat_group = {
+ .name = "hpb_stats",
+ .attrs = hpb_dev_stat_attrs,
+};
+
+/* SYSFS functions */
+#define ufshpb_sysfs_param_show_func(__name) \
+static ssize_t __name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
+ \
+ if (!hpb) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%d\n", hpb->params.__name); \
+}
+
+ufshpb_sysfs_param_show_func(requeue_timeout_ms);
+static ssize_t
+requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0)
+ return -EINVAL;
+
+ hpb->params.requeue_timeout_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(requeue_timeout_ms);
+
+ufshpb_sysfs_param_show_func(activation_thld);
+static ssize_t
+activation_thld_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ hpb->params.activation_thld = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(activation_thld);
+
+ufshpb_sysfs_param_show_func(normalization_factor);
+static ssize_t
+normalization_factor_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
+ return -EINVAL;
+
+ hpb->params.normalization_factor = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(normalization_factor);
+
+ufshpb_sysfs_param_show_func(eviction_thld_enter);
+static ssize_t
+eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= hpb->params.eviction_thld_exit)
+ return -EINVAL;
+
+ hpb->params.eviction_thld_enter = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_enter);
+
+ufshpb_sysfs_param_show_func(eviction_thld_exit);
+static ssize_t
+eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= hpb->params.activation_thld)
+ return -EINVAL;
+
+ hpb->params.eviction_thld_exit = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_exit);
+
+ufshpb_sysfs_param_show_func(read_timeout_ms);
+static ssize_t
+read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ /* read_timeout >> timeout_polling_interval */
+ if (val < hpb->params.timeout_polling_interval_ms * 2)
+ return -EINVAL;
+
+ hpb->params.read_timeout_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(read_timeout_ms);
+
+ufshpb_sysfs_param_show_func(read_timeout_expiries);
+static ssize_t
+read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ hpb->params.read_timeout_expiries = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(read_timeout_expiries);
+
+ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
+static ssize_t
+timeout_polling_interval_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ /* timeout_polling_interval << read_timeout */
+ if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
+ return -EINVAL;
+
+ hpb->params.timeout_polling_interval_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_polling_interval_ms);
+
+ufshpb_sysfs_param_show_func(inflight_map_req);
+static ssize_t inflight_map_req_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
+ return -EINVAL;
+
+ hpb->params.inflight_map_req = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inflight_map_req);
+
+static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
+{
+ hpb->params.activation_thld = ACTIVATION_THRESHOLD;
+ hpb->params.normalization_factor = 1;
+ hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
+ hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
+ hpb->params.read_timeout_ms = READ_TO_MS;
+ hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
+ hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
+ hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
+}
+
+static struct attribute *hpb_dev_param_attrs[] = {
+ &dev_attr_requeue_timeout_ms.attr,
+ &dev_attr_activation_thld.attr,
+ &dev_attr_normalization_factor.attr,
+ &dev_attr_eviction_thld_enter.attr,
+ &dev_attr_eviction_thld_exit.attr,
+ &dev_attr_read_timeout_ms.attr,
+ &dev_attr_read_timeout_expiries.attr,
+ &dev_attr_timeout_polling_interval_ms.attr,
+ &dev_attr_inflight_map_req.attr,
+ NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_param_group = {
+ .name = "hpb_params",
+ .attrs = hpb_dev_param_attrs,
+};
+
+static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req = NULL, *t;
+ int qd = hpb->sdev_ufs_lu->queue_depth / 2;
+ int i;
+
+ INIT_LIST_HEAD(&hpb->lh_pre_req_free);
+
+ hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
+ hpb->throttle_pre_req = qd;
+ hpb->num_inflight_pre_req = 0;
+
+ if (!hpb->pre_req)
+ goto release_mem;
+
+ for (i = 0; i < qd; i++) {
+ pre_req = hpb->pre_req + i;
+ INIT_LIST_HEAD(&pre_req->list_req);
+ pre_req->req = NULL;
+
+ pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+ if (!pre_req->bio)
+ goto release_mem;
+
+ pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pre_req->wb.m_page) {
+ bio_put(pre_req->bio);
+ goto release_mem;
+ }
+
+ list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+ }
+
+ return 0;
+release_mem:
+ list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
+ list_del_init(&pre_req->list_req);
+ bio_put(pre_req->bio);
+ __free_page(pre_req->wb.m_page);
+ }
+
+ kfree(hpb->pre_req);
+ return -ENOMEM;
+}
+
+static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req = NULL;
+ int i;
+
+ for (i = 0; i < hpb->throttle_pre_req; i++) {
+ pre_req = hpb->pre_req + i;
+ bio_put(hpb->pre_req[i].bio);
+ if (!pre_req->wb.m_page)
+ __free_page(hpb->pre_req[i].wb.m_page);
+ list_del_init(&pre_req->list_req);
+ }
+
+ kfree(hpb->pre_req);
+}
+
+static void ufshpb_stat_init(struct ufshpb_lu *hpb)
+{
+ hpb->stats.hit_cnt = 0;
+ hpb->stats.miss_cnt = 0;
+ hpb->stats.rb_noti_cnt = 0;
+ hpb->stats.rb_active_cnt = 0;
+ hpb->stats.rb_inactive_cnt = 0;
+ hpb->stats.map_req_cnt = 0;
+ hpb->stats.umap_req_cnt = 0;
+}
+
+static void ufshpb_param_init(struct ufshpb_lu *hpb)
+{
+ hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
+ if (hpb->is_hcm)
+ ufshpb_hcm_param_init(hpb);
+}
+
+static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+ int ret;
+
+ spin_lock_init(&hpb->rgn_state_lock);
+ spin_lock_init(&hpb->rsp_list_lock);
+ spin_lock_init(&hpb->param_lock);
+
+ INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
+ INIT_LIST_HEAD(&hpb->lh_act_srgn);
+ INIT_LIST_HEAD(&hpb->lh_inact_rgn);
+ INIT_LIST_HEAD(&hpb->list_hpb_lu);
+
+ INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
+ if (hpb->is_hcm) {
+ INIT_WORK(&hpb->ufshpb_normalization_work,
+ ufshpb_normalization_work_handler);
+ INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
+ ufshpb_read_to_handler);
+ }
+
+ hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
+ sizeof(struct ufshpb_req), 0, 0, NULL);
+ if (!hpb->map_req_cache) {
+ dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
+ hpb->lun);
+ return -ENOMEM;
+ }
+
+ hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
+ sizeof(struct page *) * hpb->pages_per_srgn,
+ 0, 0, NULL);
+ if (!hpb->m_page_cache) {
+ dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
+ hpb->lun);
+ ret = -ENOMEM;
+ goto release_req_cache;
+ }
+
+ ret = ufshpb_pre_req_mempool_init(hpb);
+ if (ret) {
+ dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
+ hpb->lun);
+ goto release_m_page_cache;
+ }
+
+ ret = ufshpb_alloc_region_tbl(hba, hpb);
+ if (ret)
+ goto release_pre_req_mempool;
+
+ ufshpb_stat_init(hpb);
+ ufshpb_param_init(hpb);
+
+ if (hpb->is_hcm) {
+ unsigned int poll;
+
+ poll = hpb->params.timeout_polling_interval_ms;
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+ }
+
+ return 0;
+
+release_pre_req_mempool:
+ ufshpb_pre_req_mempool_destroy(hpb);
+release_m_page_cache:
+ kmem_cache_destroy(hpb->m_page_cache);
+release_req_cache:
+ kmem_cache_destroy(hpb->map_req_cache);
+ return ret;
+}
+
+static struct ufshpb_lu *
+ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
+ struct ufshpb_dev_info *hpb_dev_info,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ struct ufshpb_lu *hpb;
+ int ret;
+
+ hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
+ if (!hpb)
+ return NULL;
+
+ hpb->lun = sdev->lun;
+ hpb->sdev_ufs_lu = sdev;
+
+ ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
+
+ ret = ufshpb_lu_hpb_init(hba, hpb);
+ if (ret) {
+ dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
+ goto release_hpb;
+ }
+
+ sdev->hostdata = hpb;
+ return hpb;
+
+release_hpb:
+ kfree(hpb);
+ return NULL;
+}
+
+static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn, *next_rgn;
+ struct ufshpb_subregion *srgn, *next_srgn;
+ unsigned long flags;
+
+ /*
+ * If the device reset occurred, the remaining HPB region information
+ * may be stale. Therefore, by discarding the lists of HPB response
+ * that remained after reset, we prevent unnecessary work.
+ */
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
+ list_inact_rgn)
+ list_del_init(&rgn->list_inact_rgn);
+
+ list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
+ list_act_srgn)
+ list_del_init(&srgn->list_act_srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
+{
+ if (hpb->is_hcm) {
+ cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
+ cancel_work_sync(&hpb->ufshpb_normalization_work);
+ }
+ cancel_work_sync(&hpb->map_work);
+}
+
+static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
+{
+ int err = 0;
+ bool flag_res = true;
+ int try;
+
+ /* wait for the device to complete HPB reset query */
+ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+ dev_dbg(hba->dev,
+ "%s start flag reset polling %d times\n",
+ __func__, try);
+
+ /* Poll fHpbReset flag to be cleared */
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s reading fHpbReset flag failed with error %d\n",
+ __func__, err);
+ return flag_res;
+ }
+
+ if (!flag_res)
+ goto out;
+
+ usleep_range(1000, 1100);
+ }
+ if (flag_res) {
+ dev_err(hba->dev,
+ "%s fHpbReset was not cleared by the device\n",
+ __func__);
+ }
+out:
+ return flag_res;
+}
+
+void ufshpb_reset(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_RESET)
+ continue;
+
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ }
+}
+
+void ufshpb_reset_host(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ continue;
+ ufshpb_set_state(hpb, HPB_RESET);
+ ufshpb_cancel_jobs(hpb);
+ ufshpb_discard_rsp_lists(hpb);
+ }
+}
+
+void ufshpb_suspend(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ continue;
+ ufshpb_set_state(hpb, HPB_SUSPEND);
+ ufshpb_cancel_jobs(hpb);
+ }
+}
+
+void ufshpb_resume(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+ (ufshpb_get_state(hpb) != HPB_SUSPEND))
+ continue;
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ ufshpb_kick_map_work(hpb);
+ if (hpb->is_hcm) {
+ unsigned int poll =
+ hpb->params.timeout_polling_interval_ms;
+
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+ }
+ }
+}
+
+static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ u16 max_active_rgns;
+ u8 lu_enable;
+ int size;
+ int ret;
+ char desc_buf[QUERY_DESC_MAX_SIZE];
+
+ ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
+
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ QUERY_DESC_IDN_UNIT, lun, 0,
+ desc_buf, &size);
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: idn: %d lun: %d query request failed",
+ __func__, QUERY_DESC_IDN_UNIT, lun);
+ return ret;
+ }
+
+ lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
+ if (lu_enable != LU_ENABLED_HPB_FUNC)
+ return -ENODEV;
+
+ max_active_rgns = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
+ if (!max_active_rgns) {
+ dev_err(hba->dev,
+ "lun %d wrong number of max active regions\n", lun);
+ return -ENODEV;
+ }
+
+ hpb_lu_info->num_blocks = get_unaligned_be64(
+ desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
+ hpb_lu_info->pinned_start = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
+ hpb_lu_info->num_pinned = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
+ hpb_lu_info->max_active_rgns = max_active_rgns;
+
+ return 0;
+}
+
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+
+ if (!hpb)
+ return;
+
+ ufshpb_set_state(hpb, HPB_FAILED);
+
+ sdev = hpb->sdev_ufs_lu;
+ sdev->hostdata = NULL;
+
+ ufshpb_cancel_jobs(hpb);
+
+ ufshpb_pre_req_mempool_destroy(hpb);
+ ufshpb_destroy_region_tbl(hpb);
+
+ kmem_cache_destroy(hpb->map_req_cache);
+ kmem_cache_destroy(hpb->m_page_cache);
+
+ list_del_init(&hpb->list_hpb_lu);
+
+ kfree(hpb);
+}
+
+static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
+{
+ int pool_size;
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+ bool init_success;
+
+ if (tot_active_srgn_pages == 0) {
+ ufshpb_remove(hba);
+ return;
+ }
+
+ init_success = !ufshpb_check_hpb_reset_query(hba);
+
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ if (pool_size > tot_active_srgn_pages) {
+ mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
+ mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
+ }
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (init_success) {
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
+ queue_work(ufshpb_wq, &hpb->map_work);
+ if (!hpb->is_hcm)
+ ufshpb_issue_umap_all_req(hpb);
+ } else {
+ dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
+ ufshpb_destroy_lu(hba, sdev);
+ }
+ }
+
+ if (!init_success)
+ ufshpb_remove(hba);
+}
+
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct ufshpb_lu *hpb;
+ int ret;
+ struct ufshpb_lu_info hpb_lu_info = { 0 };
+ int lun = sdev->lun;
+
+ if (lun >= hba->dev_info.max_lu_supported)
+ goto out;
+
+ ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
+ if (ret)
+ goto out;
+
+ hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
+ &hpb_lu_info);
+ if (!hpb)
+ goto out;
+
+ tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
+ hpb->srgns_per_rgn * hpb->pages_per_srgn;
+
+out:
+ /* All LUs are initialized */
+ if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
+ ufshpb_hpb_lu_prepared(hba);
+}
+
+static int ufshpb_init_mem_wq(struct ufs_hba *hba)
+{
+ int ret;
+ unsigned int pool_size;
+
+ ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
+ sizeof(struct ufshpb_map_ctx),
+ 0, 0, NULL);
+ if (!ufshpb_mctx_cache) {
+ dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
+ return -ENOMEM;
+ }
+
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
+ __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
+
+ ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
+ ufshpb_mctx_cache);
+ if (!ufshpb_mctx_pool) {
+ dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
+ ret = -ENOMEM;
+ goto release_mctx_cache;
+ }
+
+ ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
+ if (!ufshpb_page_pool) {
+ dev_err(hba->dev, "ufshpb: cannot init page pool\n");
+ ret = -ENOMEM;
+ goto release_mctx_pool;
+ }
+
+ ufshpb_wq = alloc_workqueue("ufshpb-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ if (!ufshpb_wq) {
+ dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
+ ret = -ENOMEM;
+ goto release_page_pool;
+ }
+
+ return 0;
+
+release_page_pool:
+ mempool_destroy(ufshpb_page_pool);
+release_mctx_pool:
+ mempool_destroy(ufshpb_mctx_pool);
+release_mctx_cache:
+ kmem_cache_destroy(ufshpb_mctx_cache);
+ return ret;
+}
+
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
+{
+ struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
+ int max_active_rgns = 0;
+ int hpb_num_lu;
+
+ hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
+ if (hpb_num_lu == 0) {
+ dev_err(hba->dev, "No HPB LU supported\n");
+ hpb_info->hpb_disabled = true;
+ return;
+ }
+
+ hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
+ hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
+ max_active_rgns = get_unaligned_be16(geo_buf +
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
+
+ if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
+ max_active_rgns == 0) {
+ dev_err(hba->dev, "No HPB supported device\n");
+ hpb_info->hpb_disabled = true;
+ return;
+ }
+}
+
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+ int version, ret;
+ u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
+
+ hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
+
+ version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
+ if ((version != HPB_SUPPORT_VERSION) &&
+ (version != HPB_SUPPORT_LEGACY_VERSION)) {
+ dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
+ __func__, version);
+ hpb_dev_info->hpb_disabled = true;
+ return;
+ }
+
+ if (version == HPB_SUPPORT_LEGACY_VERSION)
+ hpb_dev_info->is_legacy = true;
+
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret)
+ dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
+ __func__);
+ hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
+
+ /*
+ * Get the number of user logical unit to check whether all
+ * scsi_device finish initialization
+ */
+ hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+}
+
+void ufshpb_init(struct ufs_hba *hba)
+{
+ struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+ int try;
+ int ret;
+
+ if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
+ return;
+
+ if (ufshpb_init_mem_wq(hba)) {
+ hpb_dev_info->hpb_disabled = true;
+ return;
+ }
+
+ atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
+ tot_active_srgn_pages = 0;
+ /* issue HPB reset query */
+ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+ ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
+ if (!ret)
+ break;
+ }
+}
+
+void ufshpb_remove(struct ufs_hba *hba)
+{
+ mempool_destroy(ufshpb_page_pool);
+ mempool_destroy(ufshpb_mctx_pool);
+ kmem_cache_destroy(ufshpb_mctx_cache);
+
+ destroy_workqueue(ufshpb_wq);
+}
+
+module_param(ufshpb_host_map_kbytes, uint, 0644);
+MODULE_PARM_DESC(ufshpb_host_map_kbytes,
+ "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
new file mode 100644
index 000000000000..a79e07398970
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#ifndef _UFSHPB_H_
+#define _UFSHPB_H_
+
+/* hpb response UPIU macro */
+#define HPB_RSP_NONE 0x0
+#define HPB_RSP_REQ_REGION_UPDATE 0x1
+#define HPB_RSP_DEV_RESET 0x2
+#define MAX_ACTIVE_NUM 2
+#define MAX_INACTIVE_NUM 2
+#define DEV_DATA_SEG_LEN 0x14
+#define DEV_SENSE_SEG_LEN 0x12
+#define DEV_DES_TYPE 0x80
+#define DEV_ADDITIONAL_LEN 0x10
+
+/* hpb map & entries macro */
+#define HPB_RGN_SIZE_UNIT 512
+#define HPB_ENTRY_BLOCK_SIZE 4096
+#define HPB_ENTRY_SIZE 0x8
+#define PINNED_NOT_SET U32_MAX
+
+/* hpb support chunk size */
+#define HPB_LEGACY_CHUNK_HIGH 1
+#define HPB_MULTI_CHUNK_LOW 7
+#define HPB_MULTI_CHUNK_HIGH 255
+
+/* hpb vender defined opcode */
+#define UFSHPB_READ 0xF8
+#define UFSHPB_READ_BUFFER 0xF9
+#define UFSHPB_READ_BUFFER_ID 0x01
+#define UFSHPB_WRITE_BUFFER 0xFA
+#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01
+#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02
+#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03
+#define HPB_WRITE_BUFFER_CMD_LENGTH 10
+#define MAX_HPB_READ_ID 0x7F
+#define HPB_READ_BUFFER_CMD_LENGTH 10
+#define LU_ENABLED_HPB_FUNC 0x02
+
+#define HPB_RESET_REQ_RETRIES 10
+#define HPB_MAP_REQ_RETRIES 5
+#define HPB_REQUEUE_TIME_MS 0
+
+#define HPB_SUPPORT_VERSION 0x200
+#define HPB_SUPPORT_LEGACY_VERSION 0x100
+
+enum UFSHPB_MODE {
+ HPB_HOST_CONTROL,
+ HPB_DEVICE_CONTROL,
+};
+
+enum UFSHPB_STATE {
+ HPB_INIT = 0,
+ HPB_PRESENT = 1,
+ HPB_SUSPEND,
+ HPB_FAILED,
+ HPB_RESET,
+};
+
+enum HPB_RGN_STATE {
+ HPB_RGN_INACTIVE,
+ HPB_RGN_ACTIVE,
+ /* pinned regions are always active */
+ HPB_RGN_PINNED,
+};
+
+enum HPB_SRGN_STATE {
+ HPB_SRGN_UNUSED,
+ HPB_SRGN_INVALID,
+ HPB_SRGN_VALID,
+ HPB_SRGN_ISSUED,
+};
+
+/**
+ * struct ufshpb_lu_info - UFSHPB logical unit related info
+ * @num_blocks: the number of logical block
+ * @pinned_start: the start region number of pinned region
+ * @num_pinned: the number of pinned regions
+ * @max_active_rgns: maximum number of active regions
+ */
+struct ufshpb_lu_info {
+ int num_blocks;
+ int pinned_start;
+ int num_pinned;
+ int max_active_rgns;
+};
+
+struct ufshpb_map_ctx {
+ struct page **m_page;
+ unsigned long *ppn_dirty;
+};
+
+struct ufshpb_subregion {
+ struct ufshpb_map_ctx *mctx;
+ enum HPB_SRGN_STATE srgn_state;
+ int rgn_idx;
+ int srgn_idx;
+ bool is_last;
+
+ /* subregion reads - for host mode */
+ unsigned int reads;
+
+ /* below information is used by rsp_list */
+ struct list_head list_act_srgn;
+};
+
+struct ufshpb_region {
+ struct ufshpb_lu *hpb;
+ struct ufshpb_subregion *srgn_tbl;
+ enum HPB_RGN_STATE rgn_state;
+ int rgn_idx;
+ int srgn_cnt;
+
+ /* below information is used by rsp_list */
+ struct list_head list_inact_rgn;
+
+ /* below information is used by lru */
+ struct list_head list_lru_rgn;
+ unsigned long rgn_flags;
+#define RGN_FLAG_DIRTY 0
+#define RGN_FLAG_UPDATE 1
+
+ /* region reads - for host mode */
+ spinlock_t rgn_lock;
+ unsigned int reads;
+ /* region "cold" timer - for host mode */
+ ktime_t read_timeout;
+ unsigned int read_timeout_expiries;
+ struct list_head list_expired_rgn;
+};
+
+#define for_each_sub_region(rgn, i, srgn) \
+ for ((i) = 0; \
+ ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
+ (i)++)
+
+/**
+ * struct ufshpb_req - HPB related request structure (write/read buffer)
+ * @req: block layer request structure
+ * @bio: bio for this request
+ * @hpb: ufshpb_lu structure that related to
+ * @list_req: ufshpb_req mempool list
+ * @sense: store its sense data
+ * @mctx: L2P map information
+ * @rgn_idx: target region index
+ * @srgn_idx: target sub-region index
+ * @lun: target logical unit number
+ * @m_page: L2P map information data for pre-request
+ * @len: length of host-side cached L2P map in m_page
+ * @lpn: start LPN of L2P map in m_page
+ */
+struct ufshpb_req {
+ struct request *req;
+ struct bio *bio;
+ struct ufshpb_lu *hpb;
+ struct list_head list_req;
+ union {
+ struct {
+ struct ufshpb_map_ctx *mctx;
+ unsigned int rgn_idx;
+ unsigned int srgn_idx;
+ unsigned int lun;
+ } rb;
+ struct {
+ struct page *m_page;
+ unsigned int len;
+ unsigned long lpn;
+ } wb;
+ };
+};
+
+struct victim_select_info {
+ struct list_head lh_lru_rgn; /* LRU list of regions */
+ int max_lru_active_cnt; /* supported hpb #region - pinned #region */
+ atomic_t active_cnt;
+};
+
+/**
+ * ufshpb_params - ufs hpb parameters
+ * @requeue_timeout_ms - requeue threshold of wb command (0x2)
+ * @activation_thld - min reads [IOs] to activate/update a region
+ * @normalization_factor - shift right the region's reads
+ * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
+ * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
+ * @read_timeout_ms - timeout [ms] from the last read IO to the region
+ * @read_timeout_expiries - amount of allowable timeout expireis
+ * @timeout_polling_interval_ms - frequency in which timeouts are checked
+ * @inflight_map_req - number of inflight map requests
+ */
+struct ufshpb_params {
+ unsigned int requeue_timeout_ms;
+ unsigned int activation_thld;
+ unsigned int normalization_factor;
+ unsigned int eviction_thld_enter;
+ unsigned int eviction_thld_exit;
+ unsigned int read_timeout_ms;
+ unsigned int read_timeout_expiries;
+ unsigned int timeout_polling_interval_ms;
+ unsigned int inflight_map_req;
+};
+
+struct ufshpb_stats {
+ u64 hit_cnt;
+ u64 miss_cnt;
+ u64 rb_noti_cnt;
+ u64 rb_active_cnt;
+ u64 rb_inactive_cnt;
+ u64 map_req_cnt;
+ u64 pre_req_cnt;
+ u64 umap_req_cnt;
+};
+
+struct ufshpb_lu {
+ int lun;
+ struct scsi_device *sdev_ufs_lu;
+
+ spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
+ struct ufshpb_region *rgn_tbl;
+
+ atomic_t hpb_state;
+
+ spinlock_t rsp_list_lock;
+ struct list_head lh_act_srgn; /* hold rsp_list_lock */
+ struct list_head lh_inact_rgn; /* hold rsp_list_lock */
+
+ /* pre request information */
+ struct ufshpb_req *pre_req;
+ int num_inflight_pre_req;
+ int throttle_pre_req;
+ int num_inflight_map_req; /* hold param_lock */
+ spinlock_t param_lock;
+
+ struct list_head lh_pre_req_free;
+ int cur_read_id;
+ int pre_req_min_tr_len;
+ int pre_req_max_tr_len;
+
+ /* cached L2P map management worker */
+ struct work_struct map_work;
+
+ /* for selecting victim */
+ struct victim_select_info lru_info;
+ struct work_struct ufshpb_normalization_work;
+ struct delayed_work ufshpb_read_to_work;
+ unsigned long work_data_bits;
+#define TIMEOUT_WORK_RUNNING 0
+
+ /* pinned region information */
+ u32 lu_pinned_start;
+ u32 lu_pinned_end;
+
+ /* HPB related configuration */
+ u32 rgns_per_lu;
+ u32 srgns_per_lu;
+ u32 last_srgn_entries;
+ int srgns_per_rgn;
+ u32 srgn_mem_size;
+ u32 entries_per_rgn_mask;
+ u32 entries_per_rgn_shift;
+ u32 entries_per_srgn;
+ u32 entries_per_srgn_mask;
+ u32 entries_per_srgn_shift;
+ u32 pages_per_srgn;
+
+ bool is_hcm;
+
+ struct ufshpb_stats stats;
+ struct ufshpb_params params;
+
+ struct kmem_cache *map_req_cache;
+ struct kmem_cache *m_page_cache;
+
+ struct list_head list_hpb_lu;
+};
+
+struct ufs_hba;
+struct ufshcd_lrb;
+
+#ifndef CONFIG_SCSI_UFS_HPB
+static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
+static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
+static void ufshpb_resume(struct ufs_hba *hba) {}
+static void ufshpb_suspend(struct ufs_hba *hba) {}
+static void ufshpb_reset(struct ufs_hba *hba) {}
+static void ufshpb_reset_host(struct ufs_hba *hba) {}
+static void ufshpb_init(struct ufs_hba *hba) {}
+static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_remove(struct ufs_hba *hba) {}
+static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
+static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
+static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
+static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
+#else
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_resume(struct ufs_hba *hba);
+void ufshpb_suspend(struct ufs_hba *hba);
+void ufshpb_reset(struct ufs_hba *hba);
+void ufshpb_reset_host(struct ufs_hba *hba);
+void ufshpb_init(struct ufs_hba *hba);
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_remove(struct ufs_hba *hba);
+bool ufshpb_is_allowed(struct ufs_hba *hba);
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
+bool ufshpb_is_legacy(struct ufs_hba *hba);
+extern struct attribute_group ufs_sysfs_hpb_stat_group;
+extern struct attribute_group ufs_sysfs_hpb_param_group;
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b0deaf4af5a3..c25ce8f0e0af 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -519,7 +519,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
- struct request *rq = sc->request;
+ struct request *rq = scsi_cmd_to_rq(sc);
struct blk_integrity *bi;
virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
@@ -543,7 +543,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
struct scsi_cmnd *sc)
{
- u32 tag = blk_mq_unique_tag(sc->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
u16 hwq = blk_mq_unique_tag_to_hwq(tag);
return &vscsi->req_vqs[hwq];
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index ec9d399fbbd8..0204e314b482 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -212,7 +212,7 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = sc->request->timeout / HZ;
+ ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index c163b14774d7..72171ea3dd53 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,7 +5,7 @@ menuconfig TARGET_CORE
depends on BLOCK
select CONFIGFS_FS
select CRC_T10DIF
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
select SGL_ALLOC
default n
help
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index b044999ad002..072afd070f3e 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -234,7 +234,7 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
- int ret = -EINVAL;
+ int ret;
if ((!ccmd->setup_ddp) ||
(!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6d0b0e67e79e..3dfc7ed79ba4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -183,7 +183,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
- tl_cmd->sc_cmd_tag = sc->request->tag;
+ tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
tcm_loop_target_queue_cmd(tl_cmd);
return 0;
@@ -241,7 +241,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED;
+ int ret;
/*
* Locate the tcm_loop_hba_t pointer
@@ -249,7 +249,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
- sc->request->tag, TMR_ABORT_TASK);
+ scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -261,7 +261,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED;
+ int ret;
/*
* Locate the tcm_loop_hba_t pointer
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 4d3ceee23622..b9f9fb5d7e63 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1389,8 +1389,8 @@ static void sbp_sense_mangle(struct sbp_target_request *req)
(sense[0] & 0x80) | /* valid */
((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
(sense[2] & 0x0f); /* sense_key */
- status[2] = se_cmd->scsi_asc; /* sense_code */
- status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
+ status[2] = 0; /* XXX sense_code */
+ status[3] = 0; /* XXX sense_qualifier */
/* information */
status[4] = sense[3];
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 3bb921345bce..cb1de1ecaaa6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -428,22 +428,6 @@ out:
return rc;
}
-static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
-{
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->fabric_name, alua_ascq);
-
- cmd->scsi_asc = 0x04;
- cmd->scsi_ascq = alua_ascq;
-}
-
static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
@@ -458,9 +442,9 @@ static inline void core_alua_state_nonoptimized(
cmd->alua_nonop_delay = nonop_delay_msecs;
}
-static inline int core_alua_state_lba_dependent(
+static inline sense_reason_t core_alua_state_lba_dependent(
struct se_cmd *cmd,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+ u16 tg_pt_gp_id)
{
struct se_device *dev = cmd->se_dev;
u64 segment_size, segment_mult, sectors, lba;
@@ -506,23 +490,19 @@ static inline int core_alua_state_lba_dependent(
}
if (!cur_map) {
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
lba_map_mem_list) {
- if (map_mem->lba_map_mem_alua_pg_id !=
- tg_pt_gp->tg_pt_gp_id)
+ if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
continue;
switch(map_mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_STANDBY:
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
case ALUA_ACCESS_STATE_UNAVAILABLE:
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
default:
break;
}
@@ -532,7 +512,7 @@ static inline int core_alua_state_lba_dependent(
return 0;
}
-static inline int core_alua_state_standby(
+static inline sense_reason_t core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -556,24 +536,21 @@ static inline int core_alua_state_standby(
case SAI_READ_CAPACITY_16:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
@@ -582,14 +559,13 @@ static inline int core_alua_state_standby(
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
return 0;
}
-static inline int core_alua_state_unavailable(
+static inline sense_reason_t core_alua_state_unavailable(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -606,30 +582,27 @@ static inline int core_alua_state_unavailable(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
return 0;
}
-static inline int core_alua_state_transition(
+static inline sense_reason_t core_alua_state_transition(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -646,16 +619,14 @@ static inline int core_alua_state_transition(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
return 0;
@@ -674,6 +645,8 @@ target_alua_state_check(struct se_cmd *cmd)
struct se_lun *lun = cmd->se_lun;
struct t10_alua_tg_pt_gp *tg_pt_gp;
int out_alua_state, nonop_delay_msecs;
+ u16 tg_pt_gp_id;
+ sense_reason_t rc = TCM_NO_SENSE;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
@@ -687,8 +660,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
- return TCM_CHECK_CONDITION_NOT_READY;
+ return TCM_ALUA_OFFLINE;
}
if (!lun->lun_tg_pt_gp)
@@ -698,8 +670,8 @@ target_alua_state_check(struct se_cmd *cmd)
tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- // XXX: keeps using tg_pt_gp witout reference after unlock
spin_unlock(&lun->lun_tg_pt_gp_lock);
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
@@ -715,20 +687,16 @@ target_alua_state_check(struct se_cmd *cmd)
core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
break;
case ALUA_ACCESS_STATE_STANDBY:
- if (core_alua_state_standby(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_standby(cmd, cdb);
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- if (core_alua_state_unavailable(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_unavailable(cmd, cdb);
break;
case ALUA_ACCESS_STATE_TRANSITION:
- if (core_alua_state_transition(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_transition(cmd, cdb);
break;
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
- if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
@@ -738,10 +706,16 @@ target_alua_state_check(struct se_cmd *cmd)
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return TCM_INVALID_CDB_FIELD;
+ rc = TCM_INVALID_CDB_FIELD;
}
- return 0;
+ if (rc && rc != TCM_INVALID_CDB_FIELD) {
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
+ cmd->se_tfo->fabric_name, rc);
+ }
+
+ return rc;
}
/*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 44d9d028f716..4069a1edcfa3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -83,7 +83,7 @@ static int iblock_configure_device(struct se_device *dev)
struct blk_integrity *bi;
fmode_t mode;
unsigned int max_write_zeroes_sectors;
- int ret = -ENOMEM;
+ int ret;
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 26ceabe34de5..c26b3afc4575 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2003,7 +2003,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
- case TCM_CHECK_CONDITION_NOT_READY:
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
@@ -2013,6 +2012,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_TOO_MANY_SEGMENT_DESCS:
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
case TCM_INVALID_FIELD_IN_COMMAND_IU:
+ case TCM_ALUA_TG_PT_STANDBY:
+ case TCM_ALUA_TG_PT_UNAVAILABLE:
+ case TCM_ALUA_STATE_TRANSITION:
+ case TCM_ALUA_OFFLINE:
break;
case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -3277,9 +3280,6 @@ static const struct sense_detail sense_detail_table[] = {
[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
.key = UNIT_ATTENTION,
},
- [TCM_CHECK_CONDITION_NOT_READY] = {
- .key = NOT_READY,
- },
[TCM_MISCOMPARE_VERIFY] = {
.key = MISCOMPARE,
.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
@@ -3340,6 +3340,26 @@ static const struct sense_detail sense_detail_table[] = {
.asc = 0x0e,
.ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
},
+ [TCM_ALUA_TG_PT_STANDBY] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
+ },
+ [TCM_ALUA_TG_PT_UNAVAILABLE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
+ },
+ [TCM_ALUA_STATE_TRANSITION] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
+ },
+ [TCM_ALUA_OFFLINE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_OFFLINE,
+ },
};
/**
@@ -3374,11 +3394,8 @@ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
- } else if (sd->asc == 0) {
- WARN_ON_ONCE(cmd->scsi_asc == 0);
- asc = cmd->scsi_asc;
- ascq = cmd->scsi_ascq;
} else {
+ WARN_ON_ONCE(sd->asc == 0);
asc = sd->asc;
ascq = sd->ascq;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index fbb6ffaddfbe..9f552f48084c 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -191,6 +191,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_KEEP_BUF 1
unsigned long flags;
};
@@ -1315,11 +1316,13 @@ unlock:
mutex_unlock(&udev->cmdr_lock);
}
-static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
+static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
+ struct tcmu_cmd_entry *entry, bool keep_buf)
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
+ bool ret = true;
uint32_t read_len;
/*
@@ -1330,6 +1333,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
WARN_ON_ONCE(se_cmd);
goto out;
}
+ if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
+ entry->hdr.cmd_id);
+ set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+ ret = false;
+ goto out;
+ }
list_del_init(&cmd->queue_entry);
@@ -1379,8 +1389,22 @@ done:
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
- tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
- tcmu_free_cmd(cmd);
+ if (!keep_buf) {
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ } else {
+ /*
+ * Keep this command after completion, since userspace still
+ * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
+ * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
+ * a second completion later.
+ * Userspace can free the buffer later by writing the cmd_id
+ * to new action attribute free_kept_buf.
+ */
+ clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
+ }
+ return ret;
}
static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
@@ -1432,6 +1456,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
+ bool keep_buf;
/*
* Flush max. up to end of cmd ring since current entry might
@@ -1453,7 +1478,11 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
+ keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
+ if (keep_buf)
+ cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
+ else
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
@@ -1461,7 +1490,8 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
return false;
}
- tcmu_handle_completion(cmd, entry);
+ if (!tcmu_handle_completion(cmd, entry, keep_buf))
+ break;
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
@@ -1619,7 +1649,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
+ test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
kmem_cache_free(tcmu_cmd_cache, cmd);
return 0;
}
@@ -1903,6 +1934,38 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
static int tcmu_release(struct uio_info *info, struct inode *inode)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+ struct tcmu_cmd *cmd;
+ unsigned long i;
+ bool freed = false;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ xa_for_each(&udev->commands, i, cmd) {
+ /* Cmds with KEEP_BUF set are no longer on the ring, but
+ * userspace still holds the data buffer. If userspace closes
+ * we implicitly free these cmds and buffers, since after new
+ * open the (new ?) userspace cannot find the cmd in the ring
+ * and thus never will release the buffer by writing cmd_id to
+ * free_kept_buf action attribute.
+ */
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
+ continue;
+ pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
+ cmd->cmd_id, udev->name);
+ freed = true;
+
+ xa_erase(&udev->commands, i);
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ }
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (freed && list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+ mutex_unlock(&udev->cmdr_lock);
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
@@ -2147,7 +2210,8 @@ static int tcmu_configure_device(struct se_device *dev)
mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
TCMU_MAILBOX_FLAG_CAP_READ_LEN |
- TCMU_MAILBOX_FLAG_CAP_TMR;
+ TCMU_MAILBOX_FLAG_CAP_TMR |
+ TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
@@ -2279,12 +2343,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
xa_for_each(&udev->commands, i, cmd) {
- pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
- cmd->cmd_id, udev->name,
- test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
+ pr_debug("removing cmd %u on dev %s from ring %s\n",
+ cmd->cmd_id, udev->name,
+ test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
+ "(is expired)" :
+ (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
+ "(is keep buffer)" : ""));
xa_erase(&udev->commands, i);
- if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
+ !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
cmd->se_cmd->priv = NULL;
@@ -2933,6 +3001,65 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR_WO(tcmu_, reset_ring);
+static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *cmd;
+ u16 cmd_id;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(page, 0, &cmd_id);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ {
+ XA_STATE(xas, &udev->commands, cmd_id);
+
+ xas_lock(&xas);
+ cmd = xas_load(&xas);
+ if (!cmd) {
+ pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
+ cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+out_unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
+
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
@@ -2951,6 +3078,7 @@ static struct configfs_attribute **tcmu_attrs;
static struct configfs_attribute *tcmu_action_attrs[] = {
&tcmu_attr_block_dev,
&tcmu_attr_reset_ring,
+ &tcmu_attr_free_kept_buf,
NULL,
};
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 8e8588933628..15db7a3868fe 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -586,6 +586,7 @@ static int qe_ep_init(struct qe_udc *udc,
case USB_SPEED_FULL:
if (max <= 1023)
break;
+ fallthrough;
default:
goto en_done;
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index f4304ce69350..4c5a0a49035f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
- disk = srb->request->rq_disk;
+ disk = scsi_cmd_to_rq(srb)->rq_disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 98f193078c05..1c855145711b 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
fb_var_to_videomode(&mode2, &info->var);
/* make sure we don't delete the videomode of current var */
ret = fb_mode_is_equal(&mode1, &mode2);
-
- if (!ret)
- fbcon_mode_deleted(info, &mode1);
-
- if (!ret)
- fb_delete_videomode(&mode1, &info->modelist);
-
+ if (!ret) {
+ ret = fbcon_mode_deleted(info, &mode1);
+ if (!ret)
+ fb_delete_videomode(&mode1, &info->modelist);
+ }
return ret ? -EINVAL : 0;
}
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index ffbf900648d9..438e2c78142f 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
case FB_BLANK_POWERDOWN:
/* turn off panel */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
+ break;
+
default:
break;
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 38b127b9edfc..9e7d9d0c763d 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1498,9 +1498,18 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
return;
- mutex_lock(&fs_info->reclaim_bgs_lock);
+ /*
+ * Long running balances can keep us blocked here for eternity, so
+ * simply skip reclaim if we're unable to get the mutex.
+ */
+ if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
+ btrfs_exclop_finish(fs_info);
+ return;
+ }
+
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->reclaim_bgs)) {
+ u64 zone_unusable;
int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1534,13 +1543,22 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
goto next;
}
+ /*
+ * Cache the zone_unusable value before turning the block group
+ * to read only. As soon as the blog group is read only it's
+ * zone_unusable value gets moved to the block group's read-only
+ * bytes and isn't available for calculations anymore.
+ */
+ zone_unusable = bg->zone_unusable;
ret = inc_block_group_ro(bg, 0);
up_write(&space_info->groups_sem);
if (ret < 0)
goto next;
- btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used",
- bg->start, div_u64(bg->used * 100, bg->length));
+ btrfs_info(fs_info,
+ "reclaiming chunk %llu with %llu%% used %llu%% unusable",
+ bg->start, div_u64(bg->used * 100, bg->length),
+ div64_u64(zone_unusable * 100, bg->length));
trace_btrfs_reclaim_block_group(bg);
ret = btrfs_relocate_chunk(fs_info, bg->start);
if (ret)
@@ -2197,6 +2215,13 @@ error:
return ret;
}
+/*
+ * This function, insert_block_group_item(), belongs to the phase 2 of chunk
+ * allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
static int insert_block_group_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group)
{
@@ -2219,15 +2244,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
}
+/*
+ * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
+ * chunk allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *block_group;
int ret = 0;
- if (!trans->can_flush_pending_bgs)
- return;
-
while (!list_empty(&trans->new_bgs)) {
int index;
@@ -2242,6 +2271,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
ret = insert_block_group_item(trans, block_group);
if (ret)
btrfs_abort_transaction(trans, ret);
+ if (!block_group->chunk_item_inserted) {
+ mutex_lock(&fs_info->chunk_mutex);
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
+ mutex_unlock(&fs_info->chunk_mutex);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
ret = btrfs_finish_chunk_alloc(trans, block_group->start,
block_group->length);
if (ret)
@@ -2265,8 +2301,9 @@ next:
btrfs_trans_release_chunk_metadata(trans);
}
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size)
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ u64 bytes_used, u64 type,
+ u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache;
@@ -2276,7 +2313,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
if (!cache)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cache->length = size;
set_free_space_tree_thresholds(cache);
@@ -2290,7 +2327,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
ret = btrfs_load_block_group_zone_info(cache, true);
if (ret) {
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
ret = exclude_super_stripes(cache);
@@ -2298,7 +2335,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
/* We may have excluded something, so call this just in case */
btrfs_free_excluded_extents(cache);
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
@@ -2325,7 +2362,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
/*
@@ -2344,7 +2381,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
btrfs_update_delayed_refs_rsv(trans);
set_avail_alloc_bits(fs_info, type);
- return 0;
+ return cache;
}
/*
@@ -3222,11 +3259,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+{
+ struct btrfs_block_group *bg;
+ int ret;
+
+ /*
+ * Check if we have enough space in the system space info because we
+ * will need to update device items in the chunk btree and insert a new
+ * chunk item in the chunk btree as well. This will allocate a new
+ * system block group if needed.
+ */
+ check_system_chunk(trans, flags);
+
+ bg = btrfs_alloc_chunk(trans, flags);
+ if (IS_ERR(bg)) {
+ ret = PTR_ERR(bg);
+ goto out;
+ }
+
+ /*
+ * If this is a system chunk allocation then stop right here and do not
+ * add the chunk item to the chunk btree. This is to prevent a deadlock
+ * because this system chunk allocation can be triggered while COWing
+ * some extent buffer of the chunk btree and while holding a lock on a
+ * parent extent buffer, in which case attempting to insert the chunk
+ * item (or update the device item) would result in a deadlock on that
+ * parent extent buffer. In this case defer the chunk btree updates to
+ * the second phase of chunk allocation and keep our reservation until
+ * the second phase completes.
+ *
+ * This is a rare case and can only be triggered by the very few cases
+ * we have where we need to touch the chunk btree outside chunk allocation
+ * and chunk removal. These cases are basically adding a device, removing
+ * a device or resizing a device.
+ */
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return 0;
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ /*
+ * Normally we are not expected to fail with -ENOSPC here, since we have
+ * previously reserved space in the system space_info and allocated one
+ * new system chunk if necessary. However there are two exceptions:
+ *
+ * 1) We may have enough free space in the system space_info but all the
+ * existing system block groups have a profile which can not be used
+ * for extent allocation.
+ *
+ * This happens when mounting in degraded mode. For example we have a
+ * RAID1 filesystem with 2 devices, lose one device and mount the fs
+ * using the other device in degraded mode. If we then allocate a chunk,
+ * we may have enough free space in the existing system space_info, but
+ * none of the block groups can be used for extent allocation since they
+ * have a RAID1 profile, and because we are in degraded mode with a
+ * single device, we are forced to allocate a new system chunk with a
+ * SINGLE profile. Making check_system_chunk() iterate over all system
+ * block groups and check if they have a usable profile and enough space
+ * can be slow on very large filesystems, so we tolerate the -ENOSPC and
+ * try again after forcing allocation of a new system chunk. Like this
+ * we avoid paying the cost of that search in normal circumstances, when
+ * we were not mounted in degraded mode;
+ *
+ * 2) We had enough free space info the system space_info, and one suitable
+ * block group to allocate from when we called check_system_chunk()
+ * above. However right after we called it, the only system block group
+ * with enough free space got turned into RO mode by a running scrub,
+ * and in this case we have to allocate a new one and retry. We only
+ * need do this allocate and retry once, since we have a transaction
+ * handle and scrub uses the commit root to search for block groups.
+ */
+ if (ret == -ENOSPC) {
+ const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
+ struct btrfs_block_group *sys_bg;
+
+ sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+ if (IS_ERR(sys_bg)) {
+ ret = PTR_ERR(sys_bg);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+ } else if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+out:
+ btrfs_trans_release_chunk_metadata(trans);
+
+ return ret;
+}
+
/*
- * If force is CHUNK_ALLOC_FORCE:
+ * Chunk allocation is done in 2 phases:
+ *
+ * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
+ * the chunk, the chunk mapping, create its block group and add the items
+ * that belong in the chunk btree to it - more specifically, we need to
+ * update device items in the chunk btree and add a new chunk item to it.
+ *
+ * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
+ * group item to the extent btree and the device extent items to the devices
+ * btree.
+ *
+ * This is done to prevent deadlocks. For example when COWing a node from the
+ * extent btree we are holding a write lock on the node's parent and if we
+ * trigger chunk allocation and attempted to insert the new block group item
+ * in the extent btree right way, we could deadlock because the path for the
+ * insertion can include that parent node. At first glance it seems impossible
+ * to trigger chunk allocation after starting a transaction since tasks should
+ * reserve enough transaction units (metadata space), however while that is true
+ * most of the time, chunk allocation may still be triggered for several reasons:
+ *
+ * 1) When reserving metadata, we check if there is enough free space in the
+ * metadata space_info and therefore don't trigger allocation of a new chunk.
+ * However later when the task actually tries to COW an extent buffer from
+ * the extent btree or from the device btree for example, it is forced to
+ * allocate a new block group (chunk) because the only one that had enough
+ * free space was just turned to RO mode by a running scrub for example (or
+ * device replace, block group reclaim thread, etc), so we can not use it
+ * for allocating an extent and end up being forced to allocate a new one;
+ *
+ * 2) Because we only check that the metadata space_info has enough free bytes,
+ * we end up not allocating a new metadata chunk in that case. However if
+ * the filesystem was mounted in degraded mode, none of the existing block
+ * groups might be suitable for extent allocation due to their incompatible
+ * profile (for e.g. mounting a 2 devices filesystem, where all block groups
+ * use a RAID1 profile, in degraded mode using a single device). In this case
+ * when the task attempts to COW some extent buffer of the extent btree for
+ * example, it will trigger allocation of a new metadata block group with a
+ * suitable profile (SINGLE profile in the example of the degraded mount of
+ * the RAID1 filesystem);
+ *
+ * 3) The task has reserved enough transaction units / metadata space, but when
+ * it attempts to COW an extent buffer from the extent or device btree for
+ * example, it does not find any free extent in any metadata block group,
+ * therefore forced to try to allocate a new metadata block group.
+ * This is because some other task allocated all available extents in the
+ * meanwhile - this typically happens with tasks that don't reserve space
+ * properly, either intentionally or as a bug. One example where this is
+ * done intentionally is fsync, as it does not reserve any transaction units
+ * and ends up allocating a variable number of metadata extents for log
+ * tree extent buffers.
+ *
+ * We also need this 2 phases setup when adding a device to a filesystem with
+ * a seed device - we must create new metadata and system chunks without adding
+ * any of the block group items to the chunk, extent and device btrees. If we
+ * did not do it this way, we would get ENOSPC when attempting to update those
+ * btrees, since all the chunks from the seed device are read-only.
+ *
+ * Phase 1 does the updates and insertions to the chunk btree because if we had
+ * it done in phase 2 and have a thundering herd of tasks allocating chunks in
+ * parallel, we risk having too many system chunks allocated by many tasks if
+ * many tasks reach phase 1 without the previous ones completing phase 2. In the
+ * extreme case this leads to exhaustion of the system chunk array in the
+ * superblock. This is easier to trigger if using a btree node/leaf size of 64K
+ * and with RAID filesystems (so we have more device items in the chunk btree).
+ * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
+ * the system chunk array due to concurrent allocations") provides more details.
+ *
+ * For allocation of system chunks, we defer the updates and insertions into the
+ * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because
+ * if the chunk allocation is triggered while COWing an extent buffer of the
+ * chunk btree, we are holding a lock on the parent of that extent buffer and
+ * doing the chunk btree updates and insertions can require locking that parent.
+ * This is for the very few and rare cases where we update the chunk btree that
+ * are not chunk allocation or chunk removal: adding a device, removing a device
+ * or resizing a device.
+ *
+ * The reservation of system space, done through check_system_chunk(), as well
+ * as all the updates and insertions into the chunk btree must be done while
+ * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
+ * an extent buffer from the chunks btree we never trigger allocation of a new
+ * system chunk, which would result in a deadlock (trying to lock twice an
+ * extent buffer of the chunk btree, first time before triggering the chunk
+ * allocation and the second time during chunk allocation while attempting to
+ * update the chunks btree). The system chunk array is also updated while holding
+ * that mutex. The same logic applies to removing chunks - we must reserve system
+ * space, update the chunk btree and the system chunk array in the superblock
+ * while holding fs_info->chunk_mutex.
+ *
+ * This function, btrfs_chunk_alloc(), belongs to phase 1.
+ *
+ * If @force is CHUNK_ALLOC_FORCE:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
- * If force is NOT CHUNK_ALLOC_FORCE:
+ * If @force is NOT CHUNK_ALLOC_FORCE:
* - return 0 if it doesn't need to allocate a new chunk,
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
@@ -3243,6 +3472,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
+ /*
+ * If we are removing a chunk, don't re-enter or we would deadlock.
+ * System space reservation and system chunk allocation is done by the
+ * chunk remove operation (btrfs_remove_chunk()).
+ */
+ if (trans->removing_chunk)
+ return -ENOSPC;
space_info = btrfs_find_space_info(fs_info, flags);
ASSERT(space_info);
@@ -3306,13 +3542,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
- /*
- * Check if we have enough space in SYSTEM chunk because we may need
- * to update devices.
- */
- check_system_chunk(trans, flags);
-
- ret = btrfs_alloc_chunk(trans, flags);
+ ret = do_chunk_alloc(trans, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
@@ -3331,22 +3561,6 @@ out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
- /*
- * When we allocate a new chunk we reserve space in the chunk block
- * reserve to make sure we can COW nodes/leafs in the chunk tree or
- * add new nodes/leafs to it if we end up needing to do it when
- * inserting the chunk item and updating device items as part of the
- * second phase of chunk allocation, performed by
- * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
- * large number of new block groups to create in our transaction
- * handle's new_bgs list to avoid exhausting the chunk block reserve
- * in extreme cases - like having a single transaction create many new
- * block groups when starting to write out the free space caches of all
- * the block groups that were made dirty during the lifetime of the
- * transaction.
- */
- if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
- btrfs_create_pending_block_groups(trans);
return ret;
}
@@ -3367,7 +3581,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
*/
void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
{
- struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *info;
u64 left;
@@ -3382,7 +3595,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
lockdep_assert_held(&fs_info->chunk_mutex);
info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
-again:
spin_lock(&info->lock);
left = info->total_bytes - btrfs_space_info_used(info, true);
spin_unlock(&info->lock);
@@ -3401,76 +3613,39 @@ again:
if (left < thresh) {
u64 flags = btrfs_system_alloc_profile(fs_info);
- u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
-
- /*
- * If there's not available space for the chunk tree (system
- * space) and there are other tasks that reserved space for
- * creating a new system block group, wait for them to complete
- * the creation of their system block group and release excess
- * reserved space. We do this because:
- *
- * *) We can end up allocating more system chunks than necessary
- * when there are multiple tasks that are concurrently
- * allocating block groups, which can lead to exhaustion of
- * the system array in the superblock;
- *
- * *) If we allocate extra and unnecessary system block groups,
- * despite being empty for a long time, and possibly forever,
- * they end not being added to the list of unused block groups
- * because that typically happens only when deallocating the
- * last extent from a block group - which never happens since
- * we never allocate from them in the first place. The few
- * exceptions are when mounting a filesystem or running scrub,
- * which add unused block groups to the list of unused block
- * groups, to be deleted by the cleaner kthread.
- * And even when they are added to the list of unused block
- * groups, it can take a long time until they get deleted,
- * since the cleaner kthread might be sleeping or busy with
- * other work (deleting subvolumes, running delayed iputs,
- * defrag scheduling, etc);
- *
- * This is rare in practice, but can happen when too many tasks
- * are allocating blocks groups in parallel (via fallocate())
- * and before the one that reserved space for a new system block
- * group finishes the block group creation and releases the space
- * reserved in excess (at btrfs_create_pending_block_groups()),
- * other tasks end up here and see free system space temporarily
- * not enough for updating the chunk tree.
- *
- * We unlock the chunk mutex before waiting for such tasks and
- * lock it again after the wait, otherwise we would deadlock.
- * It is safe to do so because allocating a system chunk is the
- * first thing done while allocating a new block group.
- */
- if (reserved > trans->chunk_bytes_reserved) {
- const u64 min_needed = reserved - thresh;
-
- mutex_unlock(&fs_info->chunk_mutex);
- wait_event(cur_trans->chunk_reserve_wait,
- atomic64_read(&cur_trans->chunk_bytes_reserved) <=
- min_needed);
- mutex_lock(&fs_info->chunk_mutex);
- goto again;
- }
+ struct btrfs_block_group *bg;
/*
* Ignore failure to create system chunk. We might end up not
* needing it, as we might not need to COW all nodes/leafs from
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
+ *
+ * Also, if our caller is allocating a system chunk, do not
+ * attempt to insert the chunk item in the chunk btree, as we
+ * could deadlock on an extent buffer since our caller may be
+ * COWing an extent buffer from the chunk btree.
*/
- ret = btrfs_alloc_chunk(trans, flags);
+ bg = btrfs_alloc_chunk(trans, flags);
+ if (IS_ERR(bg)) {
+ ret = PTR_ERR(bg);
+ } else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ /*
+ * If we fail to add the chunk item here, we end up
+ * trying again at phase 2 of chunk allocation, at
+ * btrfs_create_pending_block_groups(). So ignore
+ * any error here.
+ */
+ btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ }
}
if (!ret) {
ret = btrfs_block_rsv_add(fs_info->chunk_root,
&fs_info->chunk_block_rsv,
thresh, BTRFS_RESERVE_NO_FLUSH);
- if (!ret) {
- atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
+ if (!ret)
trans->chunk_bytes_reserved += thresh;
- }
}
}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 7b927425dc71..c72a71efcb18 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -97,6 +97,7 @@ struct btrfs_block_group {
unsigned int removed:1;
unsigned int to_copy:1;
unsigned int relocating_repair:1;
+ unsigned int chunk_item_inserted:1;
int disk_cache_state;
@@ -268,8 +269,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work);
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size);
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ u64 bytes_used, u64 type,
+ u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4bc3ca2cbd7d..c5c08c87e130 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -364,49 +364,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return 0;
}
-static struct extent_buffer *alloc_tree_block_no_bg_flush(
- struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 parent_start,
- const struct btrfs_disk_key *disk_key,
- int level,
- u64 hint,
- u64 empty_size,
- enum btrfs_lock_nesting nest)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *ret;
-
- /*
- * If we are COWing a node/leaf from the extent, chunk, device or free
- * space trees, make sure that we do not finish block group creation of
- * pending block groups. We do this to avoid a deadlock.
- * COWing can result in allocation of a new chunk, and flushing pending
- * block groups (btrfs_create_pending_block_groups()) can be triggered
- * when finishing allocation of a new chunk. Creation of a pending block
- * group modifies the extent, chunk, device and free space trees,
- * therefore we could deadlock with ourselves since we are holding a
- * lock on an extent buffer that btrfs_create_pending_block_groups() may
- * try to COW later.
- * For similar reasons, we also need to delay flushing pending block
- * groups when splitting a leaf or node, from one of those trees, since
- * we are holding a write lock on it and its parent or when inserting a
- * new root node for one of those trees.
- */
- if (root == fs_info->extent_root ||
- root == fs_info->chunk_root ||
- root == fs_info->dev_root ||
- root == fs_info->free_space_root)
- trans->can_flush_pending_bgs = false;
-
- ret = btrfs_alloc_tree_block(trans, root, parent_start,
- root->root_key.objectid, disk_key, level,
- hint, empty_size, nest);
- trans->can_flush_pending_bgs = true;
-
- return ret;
-}
-
/*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -455,8 +412,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
- cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
- level, search_start, empty_size, nest);
+ cow = btrfs_alloc_tree_block(trans, root, parent_start,
+ root->root_key.objectid, &disk_key, level,
+ search_start, empty_size, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -2458,9 +2416,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
- root->node->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &lower_key, level, root->node->start, 0,
+ BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
return PTR_ERR(c);
@@ -2589,8 +2547,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
- c->start, 0, BTRFS_NESTING_SPLIT);
+ split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, level, c->start, 0,
+ BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
return PTR_ERR(split);
@@ -3381,10 +3340,10 @@ again:
* BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
* use BTRFS_NESTING_NEW_ROOT.
*/
- right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
- l->start, 0, num_doubles ?
- BTRFS_NESTING_NEW_ROOT :
- BTRFS_NESTING_SPLIT);
+ right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, 0, l->start, 0,
+ num_doubles ? BTRFS_NESTING_NEW_ROOT :
+ BTRFS_NESTING_SPLIT);
if (IS_ERR(right))
return PTR_ERR(right);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e6eb20987351..8f60314c36c5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
+/*
+ * Split an extent_map at [start, start + len]
+ *
+ * This function is intended to be used only for extract_ordered_extent().
+ */
+static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
+ u64 pre, u64 post)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ struct extent_map *split_pre = NULL;
+ struct extent_map *split_mid = NULL;
+ struct extent_map *split_post = NULL;
+ int ret = 0;
+ int modified;
+ unsigned long flags;
+
+ /* Sanity check */
+ if (pre == 0 && post == 0)
+ return 0;
+
+ split_pre = alloc_extent_map();
+ if (pre)
+ split_mid = alloc_extent_map();
+ if (post)
+ split_post = alloc_extent_map();
+ if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ASSERT(pre + post < len);
+
+ lock_extent(&inode->io_tree, start, start + len - 1);
+ write_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, start, len);
+ if (!em) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ ASSERT(em->len == len);
+ ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
+ ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
+
+ flags = em->flags;
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+ clear_bit(EXTENT_FLAG_LOGGING, &flags);
+ modified = !list_empty(&em->list);
+
+ /* First, replace the em with a new extent_map starting from * em->start */
+ split_pre->start = em->start;
+ split_pre->len = (pre ? pre : em->len - post);
+ split_pre->orig_start = split_pre->start;
+ split_pre->block_start = em->block_start;
+ split_pre->block_len = split_pre->len;
+ split_pre->orig_block_len = split_pre->block_len;
+ split_pre->ram_bytes = split_pre->len;
+ split_pre->flags = flags;
+ split_pre->compress_type = em->compress_type;
+ split_pre->generation = em->generation;
+
+ replace_extent_mapping(em_tree, em, split_pre, modified);
+
+ /*
+ * Now we only have an extent_map at:
+ * [em->start, em->start + pre] if pre != 0
+ * [em->start, em->start + em->len - post] if pre == 0
+ */
+
+ if (pre) {
+ /* Insert the middle extent_map */
+ split_mid->start = em->start + pre;
+ split_mid->len = em->len - pre - post;
+ split_mid->orig_start = split_mid->start;
+ split_mid->block_start = em->block_start + pre;
+ split_mid->block_len = split_mid->len;
+ split_mid->orig_block_len = split_mid->block_len;
+ split_mid->ram_bytes = split_mid->len;
+ split_mid->flags = flags;
+ split_mid->compress_type = em->compress_type;
+ split_mid->generation = em->generation;
+ add_extent_mapping(em_tree, split_mid, modified);
+ }
+
+ if (post) {
+ split_post->start = em->start + em->len - post;
+ split_post->len = post;
+ split_post->orig_start = split_post->start;
+ split_post->block_start = em->block_start + em->len - post;
+ split_post->block_len = split_post->len;
+ split_post->orig_block_len = split_post->block_len;
+ split_post->ram_bytes = split_post->len;
+ split_post->flags = flags;
+ split_post->compress_type = em->compress_type;
+ split_post->generation = em->generation;
+ add_extent_mapping(em_tree, split_post, modified);
+ }
+
+ /* Once for us */
+ free_extent_map(em);
+ /* Once for the tree */
+ free_extent_map(em);
+
+out_unlock:
+ write_unlock(&em_tree->lock);
+ unlock_extent(&inode->io_tree, start, start + len - 1);
+out:
+ free_extent_map(split_pre);
+ free_extent_map(split_mid);
+ free_extent_map(split_post);
+
+ return ret;
+}
+
static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
struct bio *bio, loff_t file_offset)
{
struct btrfs_ordered_extent *ordered;
- struct extent_map *em = NULL, *em_new = NULL;
- struct extent_map_tree *em_tree = &inode->extent_tree;
u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 file_len;
u64 len = bio->bi_iter.bi_size;
u64 end = start + len;
u64 ordered_end;
@@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
goto out;
}
+ file_len = ordered->num_bytes;
pre = start - ordered->disk_bytenr;
post = ordered_end - end;
ret = btrfs_split_ordered_extent(ordered, pre, post);
if (ret)
goto out;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
- if (!em) {
- read_unlock(&em_tree->lock);
- ret = -EIO;
- goto out;
- }
- read_unlock(&em_tree->lock);
-
- ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
- /*
- * We cannot reuse em_new here but have to create a new one, as
- * unpin_extent_cache() expects the start of the extent map to be the
- * logical offset of the file, which does not hold true anymore after
- * splitting.
- */
- em_new = create_io_em(inode, em->start + pre, len,
- em->start + pre, em->block_start + pre, len,
- len, len, BTRFS_COMPRESS_NONE,
- BTRFS_ORDERED_REGULAR);
- if (IS_ERR(em_new)) {
- ret = PTR_ERR(em_new);
- goto out;
- }
- free_extent_map(em_new);
+ ret = split_zoned_em(inode, file_offset, file_len, pre, post);
out:
- free_extent_map(em);
btrfs_put_ordered_extent(ordered);
return errno_to_blk_status(ret);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 50318231c1a8..14b9fdc8aaa9 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
}
/*
- * To be called after all the new block groups attached to the transaction
- * handle have been created (btrfs_create_pending_block_groups()).
+ * To be called after doing the chunk btree updates right after allocating a new
+ * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
+ * chunk after all chunk btree updates and after finishing the second phase of
+ * chunk allocation (btrfs_create_pending_block_groups()) in case some block
+ * group had its chunk item insertion delayed to the second phase.
*/
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_transaction *cur_trans = trans->transaction;
if (!trans->chunk_bytes_reserved)
return;
- WARN_ON_ONCE(!list_empty(&trans->new_bgs));
-
btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
trans->chunk_bytes_reserved, NULL);
- atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
- cond_wake_up(&cur_trans->chunk_reserve_wait);
trans->chunk_bytes_reserved = 0;
}
@@ -386,8 +384,6 @@ loop:
spin_lock_init(&cur_trans->dropped_roots_lock);
INIT_LIST_HEAD(&cur_trans->releasing_ebs);
spin_lock_init(&cur_trans->releasing_ebs_lock);
- atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
- init_waitqueue_head(&cur_trans->chunk_reserve_wait);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
@@ -701,7 +697,6 @@ again:
h->fs_info = root->fs_info;
h->type = type;
- h->can_flush_pending_bgs = true;
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 07d76029f598..ba45065f9451 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -96,13 +96,6 @@ struct btrfs_transaction {
spinlock_t releasing_ebs_lock;
struct list_head releasing_ebs;
-
- /*
- * The number of bytes currently reserved, by all transaction handles
- * attached to this transaction, for metadata extents of the chunk tree.
- */
- atomic64_t chunk_bytes_reserved;
- wait_queue_head_t chunk_reserve_wait;
};
#define __TRANS_FREEZABLE (1U << 0)
@@ -139,7 +132,7 @@ struct btrfs_trans_handle {
short aborted;
bool adding_csums;
bool allocating_chunk;
- bool can_flush_pending_bgs;
+ bool removing_chunk;
bool reloc_reserved;
bool in_fsync;
struct btrfs_root *root;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index cab451d19547..dc6eb088d73e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (!log_root_tree->node) {
ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
if (ret) {
- mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->tree_root->log_mutex);
goto out;
}
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 807502cd6510..1e4d43ffe38b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1745,19 +1745,14 @@ again:
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
- btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
goto out;
}
*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
ret = btrfs_del_item(trans, root, path);
- if (ret) {
- btrfs_handle_fs_error(fs_info, ret,
- "Failed to remove dev extent item");
- } else {
+ if (ret == 0)
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
- }
out:
btrfs_free_path(path);
return ret;
@@ -2942,7 +2937,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
u32 cur;
struct btrfs_key key;
- mutex_lock(&fs_info->chunk_mutex);
+ lockdep_assert_held(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
@@ -2972,7 +2967,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
cur += len;
}
}
- mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
@@ -3012,6 +3006,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
return em;
}
+static int remove_chunk_item(struct btrfs_trans_handle *trans,
+ struct map_lookup *map, u64 chunk_offset)
+{
+ int i;
+
+ /*
+ * Removing chunk items and updating the device items in the chunks btree
+ * requires holding the chunk_mutex.
+ * See the comment at btrfs_chunk_alloc() for the details.
+ */
+ lockdep_assert_held(&trans->fs_info->chunk_mutex);
+
+ for (i = 0; i < map->num_stripes; i++) {
+ int ret;
+
+ ret = btrfs_update_device(trans, map->stripes[i].dev);
+ if (ret)
+ return ret;
+ }
+
+ return btrfs_free_chunk(trans, chunk_offset);
+}
+
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -3032,14 +3049,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
return PTR_ERR(em);
}
map = em->map_lookup;
- mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, map->type);
- mutex_unlock(&fs_info->chunk_mutex);
/*
- * Take the device list mutex to prevent races with the final phase of
- * a device replace operation that replaces the device object associated
- * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+ * First delete the device extent items from the devices btree.
+ * We take the device_list_mutex to avoid racing with the finishing phase
+ * of a device replace operation. See the comment below before acquiring
+ * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
+ * because that can result in a deadlock when deleting the device extent
+ * items from the devices btree - COWing an extent buffer from the btree
+ * may result in allocating a new metadata chunk, which would attempt to
+ * lock again fs_info->chunk_mutex.
*/
mutex_lock(&fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
@@ -3061,18 +3080,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
}
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_update_device(trans, device);
+ /*
+ * We acquire fs_info->chunk_mutex for 2 reasons:
+ *
+ * 1) Just like with the first phase of the chunk allocation, we must
+ * reserve system space, do all chunk btree updates and deletions, and
+ * update the system chunk array in the superblock while holding this
+ * mutex. This is for similar reasons as explained on the comment at
+ * the top of btrfs_chunk_alloc();
+ *
+ * 2) Prevent races with the final phase of a device replace operation
+ * that replaces the device object associated with the map's stripes,
+ * because the device object's id can change at any time during that
+ * final phase of the device replace operation
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of
+ * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
+ * the device item, which does not exists on the chunk btree.
+ * The finishing phase of device replace acquires both the
+ * device_list_mutex and the chunk_mutex, in that order, so we are
+ * safe by just acquiring the chunk_mutex.
+ */
+ trans->removing_chunk = true;
+ mutex_lock(&fs_info->chunk_mutex);
+
+ check_system_chunk(trans, map->type);
+
+ ret = remove_chunk_item(trans, map, chunk_offset);
+ /*
+ * Normally we should not get -ENOSPC since we reserved space before
+ * through the call to check_system_chunk().
+ *
+ * Despite our system space_info having enough free space, we may not
+ * be able to allocate extents from its block groups, because all have
+ * an incompatible profile, which will force us to allocate a new system
+ * block group with the right profile, or right after we called
+ * check_system_space() above, a scrub turned the only system block group
+ * with enough free space into RO mode.
+ * This is explained with more detail at do_chunk_alloc().
+ *
+ * So if we get -ENOSPC, allocate a new system chunk and retry once.
+ */
+ if (ret == -ENOSPC) {
+ const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
+ struct btrfs_block_group *sys_bg;
+
+ sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+ if (IS_ERR(sys_bg)) {
+ ret = PTR_ERR(sys_bg);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
if (ret) {
- mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
}
- }
- mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_free_chunk(trans, chunk_offset);
- if (ret) {
+ ret = remove_chunk_item(trans, map, chunk_offset);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+ } else if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3087,6 +3161,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
}
+ mutex_unlock(&fs_info->chunk_mutex);
+ trans->removing_chunk = false;
+
+ /*
+ * We are done with chunk btree updates and deletions, so release the
+ * system space we previously reserved (with check_system_chunk()).
+ */
+ btrfs_trans_release_chunk_metadata(trans);
+
ret = btrfs_remove_block_group(trans, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -3094,6 +3177,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
out:
+ if (trans->removing_chunk) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ trans->removing_chunk = false;
+ }
/* once for us */
free_extent_map(em);
return ret;
@@ -4860,13 +4947,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
u32 array_size;
u8 *ptr;
- mutex_lock(&fs_info->chunk_mutex);
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size + sizeof(disk_key)
- > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
- mutex_unlock(&fs_info->chunk_mutex);
+ > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
return -EFBIG;
- }
ptr = super_copy->sys_chunk_array + array_size;
btrfs_cpu_key_to_disk(&disk_key, key);
@@ -4875,7 +4961,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
- mutex_unlock(&fs_info->chunk_mutex);
return 0;
}
@@ -5225,13 +5310,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
}
}
-static int create_chunk(struct btrfs_trans_handle *trans,
+static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
{
struct btrfs_fs_info *info = trans->fs_info;
struct map_lookup *map = NULL;
struct extent_map_tree *em_tree;
+ struct btrfs_block_group *block_group;
struct extent_map *em;
u64 start = ctl->start;
u64 type = ctl->type;
@@ -5241,7 +5327,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
if (!map)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
map->num_stripes = ctl->num_stripes;
for (i = 0; i < ctl->ndevs; ++i) {
@@ -5263,7 +5349,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
em = alloc_extent_map();
if (!em) {
kfree(map);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
@@ -5279,12 +5365,12 @@ static int create_chunk(struct btrfs_trans_handle *trans,
if (ret) {
write_unlock(&em_tree->lock);
free_extent_map(em);
- return ret;
+ return ERR_PTR(ret);
}
write_unlock(&em_tree->lock);
- ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
- if (ret)
+ block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
+ if (IS_ERR(block_group))
goto error_del_extent;
for (i = 0; i < map->num_stripes; i++) {
@@ -5304,7 +5390,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
check_raid56_incompat_flag(info, type);
check_raid1c34_incompat_flag(info, type);
- return 0;
+ return block_group;
error_del_extent:
write_lock(&em_tree->lock);
@@ -5316,34 +5402,36 @@ error_del_extent:
/* One for the tree reference */
free_extent_map(em);
- return ret;
+ return block_group;
}
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ u64 type)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct btrfs_device_info *devices_info = NULL;
struct alloc_chunk_ctl ctl;
+ struct btrfs_block_group *block_group;
int ret;
lockdep_assert_held(&info->chunk_mutex);
if (!alloc_profile_is_valid(type, 0)) {
ASSERT(0);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (list_empty(&fs_devices->alloc_list)) {
if (btrfs_test_opt(info, ENOSPC_DEBUG))
btrfs_debug(info, "%s: no writable device", __func__);
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
}
if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
btrfs_err(info, "invalid chunk type 0x%llx requested", type);
ASSERT(0);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ctl.start = find_next_chunk(info);
@@ -5353,46 +5441,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
GFP_NOFS);
if (!devices_info)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = gather_device_info(fs_devices, &ctl, devices_info);
- if (ret < 0)
+ if (ret < 0) {
+ block_group = ERR_PTR(ret);
goto out;
+ }
ret = decide_stripe_size(fs_devices, &ctl, devices_info);
- if (ret < 0)
+ if (ret < 0) {
+ block_group = ERR_PTR(ret);
goto out;
+ }
- ret = create_chunk(trans, &ctl, devices_info);
+ block_group = create_chunk(trans, &ctl, devices_info);
out:
kfree(devices_info);
- return ret;
+ return block_group;
}
/*
- * Chunk allocation falls into two parts. The first part does work
- * that makes the new allocated chunk usable, but does not do any operation
- * that modifies the chunk tree. The second part does the work that
- * requires modifying the chunk tree. This division is important for the
- * bootstrap process of adding storage to a seed btrfs.
+ * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
*/
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_root *chunk_root = fs_info->chunk_root;
- struct btrfs_key key;
struct btrfs_device *device;
- struct btrfs_chunk *chunk;
- struct btrfs_stripe *stripe;
struct extent_map *em;
struct map_lookup *map;
- size_t item_size;
u64 dev_offset;
u64 stripe_size;
- int i = 0;
+ int i;
int ret = 0;
em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
@@ -5400,53 +5485,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
return PTR_ERR(em);
map = em->map_lookup;
- item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len;
- chunk = kzalloc(item_size, GFP_NOFS);
- if (!chunk) {
- ret = -ENOMEM;
- goto out;
- }
-
/*
* Take the device list mutex to prevent races with the final phase of
* a device replace operation that replaces the device object associated
* with the map's stripes, because the device object's id can change
* at any time during that final phase of the device replace operation
- * (dev-replace.c:btrfs_dev_replace_finishing()).
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+ * resulting in persisting a device extent item with such ID.
*/
mutex_lock(&fs_info->fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
- ret = btrfs_update_device(trans, device);
- if (ret)
- break;
ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
dev_offset, stripe_size);
if (ret)
break;
}
- if (ret) {
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+ free_extent_map(em);
+ return ret;
+}
+
+/*
+ * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
+ * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
+ * chunks.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *bg)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *chunk_root = fs_info->chunk_root;
+ struct btrfs_key key;
+ struct btrfs_chunk *chunk;
+ struct btrfs_stripe *stripe;
+ struct extent_map *em;
+ struct map_lookup *map;
+ size_t item_size;
+ int i;
+ int ret;
+
+ /*
+ * We take the chunk_mutex for 2 reasons:
+ *
+ * 1) Updates and insertions in the chunk btree must be done while holding
+ * the chunk_mutex, as well as updating the system chunk array in the
+ * superblock. See the comment on top of btrfs_chunk_alloc() for the
+ * details;
+ *
+ * 2) To prevent races with the final phase of a device replace operation
+ * that replaces the device object associated with the map's stripes,
+ * because the device object's id can change at any time during that
+ * final phase of the device replace operation
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+ * which would cause a failure when updating the device item, which does
+ * not exists, or persisting a stripe of the chunk item with such ID.
+ * Here we can't use the device_list_mutex because our caller already
+ * has locked the chunk_mutex, and the final phase of device replace
+ * acquires both mutexes - first the device_list_mutex and then the
+ * chunk_mutex. Using any of those two mutexes protects us from a
+ * concurrent device replace.
+ */
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
+ em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ map = em->map_lookup;
+ item_size = btrfs_chunk_item_size(map->num_stripes);
+
+ chunk = kzalloc(item_size, GFP_NOFS);
+ if (!chunk) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_device *device = map->stripes[i].dev;
+
+ ret = btrfs_update_device(trans, device);
+ if (ret)
+ goto out;
+ }
+
stripe = &chunk->stripe;
for (i = 0; i < map->num_stripes; i++) {
- device = map->stripes[i].dev;
- dev_offset = map->stripes[i].physical;
+ struct btrfs_device *device = map->stripes[i].dev;
+ const u64 dev_offset = map->stripes[i].physical;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
stripe++;
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- btrfs_set_stack_chunk_length(chunk, chunk_size);
+ btrfs_set_stack_chunk_length(chunk, bg->length);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
btrfs_set_stack_chunk_type(chunk, map->type);
@@ -5458,15 +5607,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
- key.offset = chunk_offset;
+ key.offset = bg->start;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
- if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
- /*
- * TODO: Cleanup of inserted chunk root in case of
- * failure.
- */
+ if (ret)
+ goto out;
+
+ bg->chunk_item_inserted = 1;
+
+ if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
+ if (ret)
+ goto out;
}
out:
@@ -5479,16 +5631,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
u64 alloc_profile;
- int ret;
+ struct btrfs_block_group *meta_bg;
+ struct btrfs_block_group *sys_bg;
+
+ /*
+ * When adding a new device for sprouting, the seed device is read-only
+ * so we must first allocate a metadata and a system chunk. But before
+ * adding the block group items to the extent, device and chunk btrees,
+ * we must first:
+ *
+ * 1) Create both chunks without doing any changes to the btrees, as
+ * otherwise we would get -ENOSPC since the block groups from the
+ * seed device are read-only;
+ *
+ * 2) Add the device item for the new sprout device - finishing the setup
+ * of a new block group requires updating the device item in the chunk
+ * btree, so it must exist when we attempt to do it. The previous step
+ * ensures this does not fail with -ENOSPC.
+ *
+ * After that we can add the block group items to their btrees:
+ * update existing device item in the chunk btree, add a new block group
+ * item to the extent btree, add a new chunk item to the chunk btree and
+ * finally add the new device extent items to the devices btree.
+ */
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
- ret = btrfs_alloc_chunk(trans, alloc_profile);
- if (ret)
- return ret;
+ meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
+ if (IS_ERR(meta_bg))
+ return PTR_ERR(meta_bg);
alloc_profile = btrfs_system_alloc_profile(fs_info);
- ret = btrfs_alloc_chunk(trans, alloc_profile);
- return ret;
+ sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
+ if (IS_ERR(sys_bg))
+ return PTR_ERR(sys_bg);
+
+ return 0;
}
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
@@ -7415,10 +7592,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
+
+ /*
+ * We are only called at mount time, so no need to take
+ * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
+ * we always lock first fs_info->chunk_mutex before
+ * acquiring any locks on the chunk tree. This is a
+ * requirement for chunk allocation, see the comment on
+ * top of btrfs_chunk_alloc() for details.
+ */
+ ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
- mutex_lock(&fs_info->chunk_mutex);
ret = read_one_chunk(&found_key, leaf, chunk);
- mutex_unlock(&fs_info->chunk_mutex);
if (ret)
goto error;
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index c7fc7caf575c..55a8ba244716 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -450,7 +450,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
struct btrfs_io_geometry *io_geom);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num);
@@ -509,6 +510,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size);
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *bg);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 57f91311fdaa..007427ba75e5 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -176,7 +176,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
}
}
- rc = dns_resolve_server_name_to_ip(name, &srvIP);
+ rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
if (rc < 0) {
cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
__func__, name, rc);
@@ -211,6 +211,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
else
noff = tkn_e - (sb_mountdata + off) + 1;
+ if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
+ off += noff;
+ continue;
+ }
if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
off += noff;
continue;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 3c2e117bb926..c0bfc2f01030 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -75,6 +75,9 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
+/* dns resolution interval in seconds */
+#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
+
/* maximum number of PDUs in one compound */
#define MAX_COMPOUND 5
@@ -646,6 +649,7 @@ struct TCP_Server_Info {
/* point to the SMBD connection if RDMA is used instead of socket */
struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */
+ struct delayed_work resolve; /* dns resolution workqueue job */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
@@ -689,6 +693,9 @@ struct TCP_Server_Info {
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ bool is_dfs_conn; /* if a dfs connection */
+#endif
};
struct cifs_credits {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 01dc45178f66..1b04d6ec14dd 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -78,6 +78,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
int rc;
int len;
char *unc, *ipaddr = NULL;
+ time64_t expiry, now;
+ unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
if (!server->hostname)
return -EINVAL;
@@ -91,13 +93,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
}
scnprintf(unc, len, "\\\\%s", server->hostname);
- rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+ rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
kfree(unc);
if (rc < 0) {
cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
__func__, server->hostname, rc);
- return rc;
+ goto requeue_resolve;
}
spin_lock(&cifs_tcp_ses_lock);
@@ -106,7 +108,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
kfree(ipaddr);
- return !rc ? -1 : 0;
+ /* rc == 1 means success here */
+ if (rc) {
+ now = ktime_get_real_seconds();
+ if (expiry && expiry > now)
+ /*
+ * To make sure we don't use the cached entry, retry 1s
+ * after expiry.
+ */
+ ttl = (expiry - now + 1);
+ }
+ rc = !rc ? -1 : 0;
+
+requeue_resolve:
+ cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
+ __func__, ttl);
+ mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
+
+ return rc;
+}
+
+
+static void cifs_resolve_server(struct work_struct *work)
+{
+ int rc;
+ struct TCP_Server_Info *server = container_of(work,
+ struct TCP_Server_Info, resolve.work);
+
+ mutex_lock(&server->srv_mutex);
+
+ /*
+ * Resolve the hostname again to make sure that IP address is up-to-date.
+ */
+ rc = reconn_set_ipaddr_from_hostname(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
+
+ mutex_unlock(&server->srv_mutex);
}
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -680,6 +720,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
+ cancel_delayed_work_sync(&server->resolve);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
@@ -1227,6 +1268,16 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ /*
+ * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
+ * DFS connections to do failover properly, so avoid sharing them with regular
+ * shares or even links that may connect to same server but having completely
+ * different failover targets.
+ */
+ if (server->is_dfs_conn)
+ continue;
+#endif
/*
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
@@ -1254,12 +1305,16 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
return;
}
+ /* srv_count can never go negative */
+ WARN_ON(server->srv_count < 0);
+
put_net(cifs_net_ns(server));
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
+ cancel_delayed_work_sync(&server->resolve);
if (from_reconnect)
/*
@@ -1342,6 +1397,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+ INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
mutex_init(&tcp_ses->reconnect_mutex);
memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
@@ -1427,6 +1483,12 @@ smbd_connected:
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
+ /* queue dns resolution delayed work */
+ cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
+ __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
+
+ queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
+
return tcp_ses;
out_err_crypto_release:
@@ -1605,6 +1667,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
}
spin_unlock(&cifs_tcp_ses_lock);
+ /* ses_count can never go negative */
+ WARN_ON(ses->ses_count < 0);
+
spin_lock(&GlobalMid_Lock);
if (ses->status == CifsGood)
ses->status = CifsExiting;
@@ -1972,6 +2037,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
return;
}
+ /* tc_count can never go negative */
+ WARN_ON(tcon->tc_count < 0);
+
if (tcon->use_witness) {
int rc;
@@ -2910,6 +2978,23 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
+ unsigned int *xid, struct TCP_Server_Info **nserver,
+ struct cifs_ses **nses, struct cifs_tcon **ntcon)
+{
+ int rc;
+
+ ctx->nosharesock = true;
+ rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon);
+ if (*nserver) {
+ cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
+ spin_lock(&cifs_tcp_ses_lock);
+ (*nserver)->is_dfs_conn = true;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+ return rc;
+}
+
/*
* cifs_build_path_to_root returns full path to root when we do not have an
* existing connection (tcon)
@@ -3105,7 +3190,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
tmp_ctx.prepath);
mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
- rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
+ rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
if (!rc || (*server && *ses)) {
/*
* We were able to connect to new target server. Update current context with
@@ -3404,7 +3489,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
}
- ctx->nosharesock = true;
+ mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ /*
+ * Ignore error check here because we may failover to other targets from cached a
+ * referral.
+ */
+ (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
/* Get path of DFS root */
ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
@@ -3433,7 +3523,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
/* Connect to new DFS target only if we were redirected */
if (oldmnt != cifs_sb->ctx->mount_options) {
mount_put_conns(cifs_sb, xid, server, ses, tcon);
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
}
if (rc && !server && !ses) {
/* Failed to connect. Try to connect to other targets in the referral. */
@@ -3459,7 +3549,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
rc = -ELOOP;
} while (rc == -EREMOTE);
- if (rc || !tcon)
+ if (rc || !tcon || !ses)
goto error;
kfree(ref_path);
@@ -4095,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
if (!tree)
return -ENOMEM;
- if (!tcon->dfs_path) {
+ /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+ if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
@@ -4105,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
goto out;
}
- rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl);
- if (rc)
- goto out;
isroot = ref.server_type == DFS_TYPE_ROOT;
free_dfs_info_param(&ref);
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index d15b82d569ef..8c616aaeb7c4 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -24,6 +24,7 @@
* dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
* @unc: UNC path specifying the server (with '/' as delimiter)
* @ip_addr: Where to return the IP address.
+ * @expiry: Where to return the expiry time for the dns record.
*
* The IP address will be returned in string form, and the caller is
* responsible for freeing it.
@@ -31,7 +32,7 @@
* Returns length of result on success, -ve on error.
*/
int
-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
+dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
{
struct sockaddr_storage ss;
const char *hostname, *sep;
@@ -66,13 +67,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* Perform the upcall */
rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
- NULL, ip_addr, NULL, false);
+ NULL, ip_addr, expiry, false);
if (rc < 0)
cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
__func__, len, len, hostname);
else
- cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n",
- __func__, len, len, hostname, *ip_addr);
+ cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
+ __func__, len, len, hostname, *ip_addr,
+ expiry ? (*expiry) : 0);
return rc;
name_is_IP_address:
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 5be060b82b13..9fa2807ef79e 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -12,7 +12,7 @@
#define _DNS_RESOLVE_H
#ifdef __KERNEL__
-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
+extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
#endif /* KERNEL */
#endif /* _DNS_RESOLVE_H */
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 184138b4eb8c..844abeb2b48f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1187,7 +1187,7 @@ int match_target_ip(struct TCP_Server_Info *server,
cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
- rc = dns_resolve_server_name_to_ip(target, &tip);
+ rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
if (rc < 0)
goto out;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e4c8f603dd58..ba3c58e1f725 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -557,8 +557,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
p = buf;
while (bytes_left >= sizeof(*p)) {
info->speed = le64_to_cpu(p->LinkSpeed);
- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
@@ -2910,6 +2910,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
/* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
+ /* tc_count can never go negative */
+ WARN_ON(tcon->tc_count < 0);
spin_unlock(&cifs_tcp_ses_lock);
}
kfree(utf16_path);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 4b27cb9105fd..e9cac7970b66 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -394,6 +394,7 @@ struct smb2_compression_capabilities_context {
__u16 Padding;
__u32 Flags;
__le16 CompressionAlgorithms[3];
+ __u16 Pad; /* Some servers require pad to DataLen multiple of 8 */
/* Check if pad needed */
} __packed;
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 2f63bf3a7325..5a0be9985bae 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -91,7 +91,10 @@ static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
pr_debug("%s: count = %zd, pos = %lld, buf = %s\n",
__func__, iov_iter_count(to), iocb->ki_pos, buffer->page);
- retval = copy_to_iter(buffer->page, buffer->count, to);
+ if (iocb->ki_pos >= buffer->count)
+ goto out;
+ retval = copy_to_iter(buffer->page + iocb->ki_pos,
+ buffer->count - iocb->ki_pos, to);
iocb->ki_pos += retval;
if (retval == 0)
retval = -EFAULT;
@@ -162,7 +165,10 @@ static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to)
buffer->needs_read_fill = 0;
}
- retval = copy_to_iter(buffer->bin_buffer, buffer->bin_buffer_size, to);
+ if (iocb->ki_pos >= buffer->bin_buffer_size)
+ goto out;
+ retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos,
+ buffer->bin_buffer_size - iocb->ki_pos, to);
iocb->ki_pos += retval;
if (retval == 0)
retval = -EFAULT;
@@ -171,21 +177,28 @@ out:
return retval;
}
-static int fill_write_buffer(struct configfs_buffer *buffer,
+/* Fill [buffer, buffer + pos) with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos,
struct iov_iter *from)
{
+ loff_t to_copy;
int copied;
+ u8 *to;
if (!buffer->page)
buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
if (!buffer->page)
return -ENOMEM;
- copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
+ to_copy = SIMPLE_ATTR_SIZE - 1 - pos;
+ if (to_copy <= 0)
+ return 0;
+ to = buffer->page + pos;
+ copied = copy_from_iter(to, to_copy, from);
buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
* so e.g. sscanf() can scan the string easily */
- buffer->page[copied] = 0;
+ to[copied] = 0;
return copied ? : -EFAULT;
}
@@ -217,7 +230,7 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t len;
mutex_lock(&buffer->mutex);
- len = fill_write_buffer(buffer, from);
+ len = fill_write_buffer(buffer, iocb->ki_pos, from);
if (len > 0)
len = flush_write_buffer(file, buffer, len);
if (len > 0)
@@ -272,7 +285,9 @@ static ssize_t configfs_bin_write_iter(struct kiocb *iocb,
buffer->bin_buffer_size = end_offset;
}
- len = copy_from_iter(buffer->bin_buffer, buffer->bin_buffer_size, from);
+ len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos,
+ buffer->bin_buffer_size - iocb->ki_pos, from);
+ iocb->ki_pos += len;
out:
mutex_unlock(&buffer->mutex);
return len ? : -EFAULT;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index dfc72f15be7f..f946bec8f1f1 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -369,8 +369,8 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
/* 32-bit arches must use fcntl64() */
case F_OFD_SETLK:
case F_OFD_SETLKW:
-#endif
fallthrough;
+#endif
case F_SETLK:
case F_SETLKW:
if (copy_from_user(&flock, argp, sizeof(flock)))
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 2834d1afa6e8..de1985eae535 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -80,6 +80,35 @@ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key)
}
/**
+ * vfs_parse_fs_param_source - Handle setting "source" via parameter
+ * @fc: The filesystem context to modify
+ * @param: The parameter
+ *
+ * This is a simple helper for filesystems to verify that the "source" they
+ * accept is sane.
+ *
+ * Returns 0 on success, -ENOPARAM if this is not "source" parameter, and
+ * -EINVAL otherwise. In the event of failure, supplementary error information
+ * is logged.
+ */
+int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param)
+{
+ if (strcmp(param->key, "source") != 0)
+ return -ENOPARAM;
+
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "Non-string source");
+
+ if (fc->source)
+ return invalf(fc, "Multiple sources");
+
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(vfs_parse_fs_param_source);
+
+/**
* vfs_parse_fs_param - Add a single parameter to a superblock config
* @fc: The filesystem context to modify
* @param: The parameter
@@ -122,15 +151,9 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
/* If the filesystem doesn't take any arguments, give it the
* default handling of source.
*/
- if (strcmp(param->key, "source") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Non-string source");
- if (fc->source)
- return invalf(fc, "VFS: Multiple sources");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
return invalf(fc, "%s: Unknown parameter '%s'",
fc->fs_type->name, param->key);
@@ -504,16 +527,11 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct legacy_fs_context *ctx = fc->fs_private;
unsigned int size = ctx->data_size;
size_t len = 0;
+ int ret;
- if (strcmp(param->key, "source") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Legacy: Non-string source");
- if (fc->source)
- return invalf(fc, "VFS: Legacy: Multiple sources");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 4af318fbda77..ef9498a6e88a 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
- mutex_lock(&tree->tree_lock);
+ switch (tree->cnid) {
+ case HFS_CAT_CNID:
+ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+ break;
+ case HFS_EXT_CNID:
+ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+ break;
+ case HFS_ATTR_CNID:
+ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index b63a4df7327b..c0a73a6ffb28 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -15,16 +15,31 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
- int off, int len)
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
{
struct page *page;
+ int pagenum;
+ int bytes_read;
+ int bytes_to_read;
+ void *vaddr;
off += node->page_offset;
- page = node->page[0];
+ pagenum = off >> PAGE_SHIFT;
+ off &= ~PAGE_MASK; /* compute page offset for the first page */
- memcpy(buf, kmap(page) + off, len);
- kunmap(page);
+ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
+ if (pagenum >= node->tree->pages_per_bnode)
+ break;
+ page = node->page[pagenum];
+ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
+
+ vaddr = kmap_atomic(page);
+ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
+ kunmap_atomic(vaddr);
+
+ pagenum++;
+ off = 0; /* page offset only applies to the first page */
+ }
}
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 4ba45caf5939..0e6baee93245 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256
+/* B-tree mutex nested subclasses */
+enum hfs_btree_mutex_classes {
+ CATALOG_BTREE_MUTEX,
+ EXTENTS_BTREE_MUTEX,
+ ATTR_BTREE_MUTEX,
+};
+
/* A HFS BTree held in memory */
struct hfs_btree {
struct super_block *sb;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 44d07c9e3a7f..12d9bae39363 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!res) {
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
res = -EIO;
- goto bail;
+ goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
}
- if (res) {
- hfs_find_exit(&fd);
- goto bail_no_root;
- }
+ if (res)
+ goto bail_hfs_find;
res = -EINVAL;
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* everything's okay */
return 0;
+bail_hfs_find:
+ hfs_find_exit(&fd);
bail_no_root:
pr_err("get root inode failed\n");
bail:
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d94fb5835a20..0cac361bf6b8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2016,7 +2016,7 @@ static void io_req_task_submit(struct io_kiocb *req)
/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
mutex_lock(&ctx->uring_lock);
- if (!(current->flags & PF_EXITING) && !current->in_execve)
+ if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
__io_queue_sqe(req);
else
io_req_complete_failed(req, -EFAULT);
@@ -6019,11 +6019,13 @@ static bool io_drain_req(struct io_kiocb *req)
ret = io_req_prep_async(req);
if (ret)
- return ret;
+ goto fail;
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
- io_req_complete_failed(req, -ENOMEM);
+ ret = -ENOMEM;
+fail:
+ io_req_complete_failed(req, ret);
return true;
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 41da4f14c00b..87ccb3438bec 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -215,6 +215,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
if (PageUptodate(page))
return;
+ BUG_ON(page_has_private(page));
BUG_ON(page->index);
BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
@@ -239,7 +240,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
{
struct iomap_readpage_ctx *ctx = data;
struct page *page = ctx->cur_page;
- struct iomap_page *iop = iomap_page_create(inode, page);
+ struct iomap_page *iop;
bool same_page = false, is_contig = false;
loff_t orig_pos = pos;
unsigned poff, plen;
@@ -252,6 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
/* zero post-eof blocks as the page may be mapped */
+ iop = iomap_page_create(inode, page);
iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
@@ -967,7 +969,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
block_commit_write(page, 0, length);
} else {
WARN_ON_ONCE(!PageUptodate(page));
- iomap_page_create(inode, page);
set_page_dirty(page);
}
@@ -1304,14 +1305,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
struct page *page, u64 end_offset)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = iomap_page_create(inode, page);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
u64 file_offset; /* file offset of page */
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index dab1b02eba5b..ce6fb810854f 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -35,23 +35,20 @@ loff_t
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{
loff_t size = i_size_read(inode);
- loff_t length = size - offset;
loff_t ret;
/* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size)
return -ENXIO;
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_hole_actor);
+ while (offset < size) {
+ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+ ops, &offset, iomap_seek_hole_actor);
if (ret < 0)
return ret;
if (ret == 0)
break;
-
offset += ret;
- length -= ret;
}
return offset;
@@ -83,27 +80,23 @@ loff_t
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{
loff_t size = i_size_read(inode);
- loff_t length = size - offset;
loff_t ret;
/* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size)
return -ENXIO;
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_data_actor);
+ while (offset < size) {
+ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+ ops, &offset, iomap_seek_data_actor);
if (ret < 0)
return ret;
if (ret == 0)
- break;
-
+ return offset;
offset += ret;
- length -= ret;
}
- if (length <= 0)
- return -ENXIO;
- return offset;
+ /* We've reached the end of the file without finding data */
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(iomap_seek_data);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index f229172652be..6e9ea4ee0f73 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -109,7 +109,7 @@ config NFSD_SCSILAYOUT
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
select EXPORTFS_BLOCK_OPS
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
help
This option enables support for the exporting pNFS SCSI layouts
in the kernel's NFS server. The pNFS SCSI layout enables NFS
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
index eac6788fc6cf..c4769a9396c5 100644
--- a/fs/vboxsf/dir.c
+++ b/fs/vboxsf/dir.c
@@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
}
static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
- umode_t mode, int is_dir)
+ umode_t mode, bool is_dir, bool excl, u64 *handle_ret)
{
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
@@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
int err;
params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
- SHFL_CF_ACT_FAIL_IF_EXISTS |
- SHFL_CF_ACCESS_READWRITE |
- (is_dir ? SHFL_CF_DIRECTORY : 0);
+ params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE;
+ if (is_dir)
+ params.create_flags |= SHFL_CF_DIRECTORY;
+ if (excl)
+ params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
+
params.info.attr.mode = (mode & 0777) |
(is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
@@ -276,30 +278,81 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
if (params.result != SHFL_FILE_CREATED)
return -EPERM;
- vboxsf_close(sbi->root, params.handle);
-
err = vboxsf_dir_instantiate(parent, dentry, &params.info);
if (err)
- return err;
+ goto out;
/* parent directory access/change time changed */
sf_parent_i->force_restat = 1;
- return 0;
+out:
+ if (err == 0 && handle_ret)
+ *handle_ret = params.handle;
+ else
+ vboxsf_close(sbi->root, params.handle);
+
+ return err;
}
static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns,
struct inode *parent, struct dentry *dentry,
umode_t mode, bool excl)
{
- return vboxsf_dir_create(parent, dentry, mode, 0);
+ return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL);
}
static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
struct inode *parent, struct dentry *dentry,
umode_t mode)
{
- return vboxsf_dir_create(parent, dentry, mode, 1);
+ return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+}
+
+static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry,
+ struct file *file, unsigned int flags, umode_t mode)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct vboxsf_handle *sf_handle;
+ struct dentry *res = NULL;
+ u64 handle;
+ int err;
+
+ if (d_in_lookup(dentry)) {
+ res = vboxsf_dir_lookup(parent, dentry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ dentry = res;
+ }
+
+ /* Only creates */
+ if (!(flags & O_CREAT) || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
+
+ err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle);
+ if (err)
+ goto out;
+
+ sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE);
+ if (IS_ERR(sf_handle)) {
+ vboxsf_close(sbi->root, handle);
+ err = PTR_ERR(sf_handle);
+ goto out;
+ }
+
+ err = finish_open(file, dentry, generic_file_open);
+ if (err) {
+ /* This also closes the handle passed to vboxsf_create_sf_handle() */
+ vboxsf_release_sf_handle(d_inode(dentry), sf_handle);
+ goto out;
+ }
+
+ file->private_data = sf_handle;
+ file->f_mode |= FMODE_CREATED;
+out:
+ dput(res);
+ return err;
}
static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
@@ -422,6 +475,7 @@ const struct inode_operations vboxsf_dir_iops = {
.lookup = vboxsf_dir_lookup,
.create = vboxsf_dir_mkfile,
.mkdir = vboxsf_dir_mkdir,
+ .atomic_open = vboxsf_dir_atomic_open,
.rmdir = vboxsf_dir_unlink,
.unlink = vboxsf_dir_unlink,
.rename = vboxsf_dir_rename,
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index c4ab5996d97a..864c2fad23be 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -20,17 +20,39 @@ struct vboxsf_handle {
struct list_head head;
};
-static int vboxsf_file_open(struct inode *inode, struct file *file)
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+ u64 handle, u32 access_flags)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct shfl_createparms params = {};
struct vboxsf_handle *sf_handle;
- u32 access_flags = 0;
- int err;
sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
if (!sf_handle)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ /* the host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+
+ /* init our handle struct and add it to the inode's handles list */
+ sf_handle->handle = handle;
+ sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
+ sf_handle->access_flags = access_flags;
+ kref_init(&sf_handle->refcount);
+
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_add(&sf_handle->head, &sf_i->handle_list);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ return sf_handle;
+}
+
+static int vboxsf_file_open(struct inode *inode, struct file *file)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+ struct shfl_createparms params = {};
+ struct vboxsf_handle *sf_handle;
+ u32 access_flags = 0;
+ int err;
/*
* We check the value of params.handle afterwards to find out if
@@ -83,23 +105,14 @@ static int vboxsf_file_open(struct inode *inode, struct file *file)
err = vboxsf_create_at_dentry(file_dentry(file), &params);
if (err == 0 && params.handle == SHFL_HANDLE_NIL)
err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
- if (err) {
- kfree(sf_handle);
+ if (err)
return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- /* init our handle struct and add it to the inode's handles list */
- sf_handle->handle = params.handle;
- sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
- sf_handle->access_flags = access_flags;
- kref_init(&sf_handle->refcount);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_add(&sf_handle->head, &sf_i->handle_list);
- mutex_unlock(&sf_i->handle_list_mutex);
+ sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
+ if (IS_ERR(sf_handle)) {
+ vboxsf_close(sbi->root, params.handle);
+ return PTR_ERR(sf_handle);
+ }
file->private_data = sf_handle;
return 0;
@@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct kref *refcount)
kfree(sf_handle);
}
-static int vboxsf_file_release(struct inode *inode, struct file *file)
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle = file->private_data;
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_del(&sf_handle->head);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ kref_put(&sf_handle->refcount, vboxsf_handle_release);
+}
+
+static int vboxsf_file_release(struct inode *inode, struct file *file)
+{
/*
* When a file is closed on our (the guest) side, we want any subsequent
* accesses done on the host side to see all changes done from our side.
*/
filemap_write_and_wait(inode->i_mapping);
- mutex_lock(&sf_i->handle_list_mutex);
- list_del(&sf_handle->head);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
+ vboxsf_release_sf_handle(inode, file->private_data);
return 0;
}
diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h
index 6a7a9cedebc6..9047befa66c5 100644
--- a/fs/vboxsf/vfsmod.h
+++ b/fs/vboxsf/vfsmod.h
@@ -18,6 +18,8 @@
#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
+struct vboxsf_handle;
+
struct vboxsf_options {
unsigned long ttl;
kuid_t uid;
@@ -80,6 +82,11 @@ extern const struct file_operations vboxsf_reg_fops;
extern const struct address_space_operations vboxsf_reg_aops;
extern const struct dentry_operations vboxsf_dentry_ops;
+/* from file.c */
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+ u64 handle, u32 access_flags);
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle);
+
/* from utils.c */
struct inode *vboxsf_new_inode(struct super_block *sb);
int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 778ec52cce70..ee9ec0c50bec 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -804,6 +804,14 @@ xfs_ag_shrink_space(
args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
/*
+ * Make sure that the last inode cluster cannot overlap with the new
+ * end of the AG, even if it's sparse.
+ */
+ error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
+ if (error)
+ return error;
+
+ /*
* Disable perag reservations so it doesn't cause the allocation request
* to fail. We'll reestablish reservation before we return.
*/
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index d9d7d5137b73..191d51725988 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -483,7 +483,7 @@ xfs_attr_set_iter(
if (error)
return error;
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_LBLK:
/* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
dac->dela_state = XFS_DAS_RM_LBLK;
@@ -496,7 +496,7 @@ xfs_attr_set_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RD_LEAF:
/*
* This is the last step for leaf format. Read the block with
@@ -528,7 +528,7 @@ xfs_attr_set_iter(
return error;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_ALLOC_NODE:
/*
* If there was an out-of-line value, allocate the blocks we
@@ -590,7 +590,7 @@ xfs_attr_set_iter(
if (error)
return error;
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_NBLK:
/* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
dac->dela_state = XFS_DAS_RM_NBLK;
@@ -603,7 +603,7 @@ xfs_attr_set_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_CLR_FLAG:
/*
* The last state for node format. Look up the old attr and
@@ -1406,7 +1406,7 @@ xfs_attr_remove_iter(
state = dac->da_state;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RMTBLK:
dac->dela_state = XFS_DAS_RMTBLK;
@@ -1441,7 +1441,7 @@ xfs_attr_remove_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_NAME:
/*
* If we came here fresh from a transaction roll, reattach all
@@ -1469,7 +1469,7 @@ xfs_attr_remove_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_SHRINK:
/*
* If the result is small enough, push it all into the inode.
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 57d9cb632983..aaf8805a82df 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2928,3 +2928,58 @@ xfs_ialloc_calc_rootino(
return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
}
+
+/*
+ * Ensure there are not sparse inode clusters that cross the new EOAG.
+ *
+ * This is a no-op for non-spinode filesystems since clusters are always fully
+ * allocated and checking the bnobt suffices. However, a spinode filesystem
+ * could have a record where the upper inodes are free blocks. If those blocks
+ * were removed from the filesystem, the inode record would extend beyond EOAG,
+ * which will be flagged as corruption.
+ */
+int
+xfs_ialloc_check_shrink(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ struct xfs_buf *agibp,
+ xfs_agblock_t new_length)
+{
+ struct xfs_inobt_rec_incore rec;
+ struct xfs_btree_cur *cur;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
+ xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length);
+ int has;
+ int error;
+
+ if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+ return 0;
+
+ pag = xfs_perag_get(mp, agno);
+ cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO);
+
+ /* Look up the inobt record that would correspond to the new EOFS. */
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
+ if (error || !has)
+ goto out;
+
+ error = xfs_inobt_get_rec(cur, &rec, &has);
+ if (error)
+ goto out;
+
+ if (!has) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* If the record covers inodes that would be beyond EOFS, bail out. */
+ if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
+ error = -ENOSPC;
+ goto out;
+ }
+out:
+ xfs_btree_del_cursor(cur, error);
+ xfs_perag_put(pag);
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 9df7c80408ff..9a2112b4ad5e 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -122,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
+int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno,
+ struct xfs_buf *agibp, xfs_agblock_t new_length);
+
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 04ce361688f7..84ea2e0af9f0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -592,23 +592,27 @@ xfs_inode_validate_extsize(
/*
* This comment describes a historic gap in this verifier function.
*
- * On older kernels, the extent size hint verifier doesn't check that
- * the extent size hint is an integer multiple of the realtime extent
- * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
- * The verifier has always enforced the alignment rule for regular
- * files with the REALTIME flag set.
+ * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
+ * function has never checked that the extent size hint is an integer
+ * multiple of the realtime extent size. Since we allow users to set
+ * this combination on non-rt filesystems /and/ to change the rt
+ * extent size when adding a rt device to a filesystem, the net effect
+ * is that users can configure a filesystem anticipating one rt
+ * geometry and change their minds later. Directories do not use the
+ * extent size hint, so this is harmless for them.
*
* If a directory with a misaligned extent size hint is allowed to
* propagate that hint into a new regular realtime file, the result
* is that the inode cluster buffer verifier will trigger a corruption
- * shutdown the next time it is run.
+ * shutdown the next time it is run, because the verifier has always
+ * enforced the alignment rule for regular files.
*
- * Unfortunately, there could be filesystems with these misconfigured
- * directories in the wild, so we cannot add a check to this verifier
- * at this time because that will result a new source of directory
- * corruption errors when reading an existing filesystem. Instead, we
- * permit the misconfiguration to pass through the verifiers so that
- * callers of this function can correct and mitigate externally.
+ * Because we allow administrators to set a new rt extent size when
+ * adding a rt section, we cannot add a check to this verifier because
+ * that will result a new source of directory corruption errors when
+ * reading an existing filesystem. Instead, we rely on callers to
+ * decide when alignment checks are appropriate, and fix things up as
+ * needed.
*/
if (rt_flag)
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 8d595a5c4abd..16f723ebe8dd 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -143,16 +143,14 @@ xfs_trans_log_inode(
}
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. If we're logging a
- * directory that is misconfigured in this way, clear the hint.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. If we're logging a directory that is
+ * misconfigured in this way, clear the hint.
*/
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
(ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
- xfs_info_once(ip->i_mount,
- "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
XFS_DIFLAG_EXTSZINHERIT);
ip->i_extsize = 0;
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 61f90b2c9430..76fbc7ca4cec 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -73,11 +73,25 @@ xchk_inode_extsize(
uint16_t flags)
{
xfs_failaddr_t fa;
+ uint32_t value = be32_to_cpu(dip->di_extsize);
- fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
- mode, flags);
+ fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags);
if (fa)
xchk_ino_set_corrupt(sc, ino);
+
+ /*
+ * XFS allows a sysadmin to change the rt extent size when adding a rt
+ * section to a filesystem after formatting. If there are any
+ * directories with extszinherit and rtinherit set, the hint could
+ * become misaligned with the new rextsize. The verifier doesn't check
+ * this, because we allow rtinherit directories even without an rt
+ * device. Flag this as an administrative warning since we will clean
+ * this up eventually.
+ */
+ if ((flags & XFS_DIFLAG_RTINHERIT) &&
+ (flags & XFS_DIFLAG_EXTSZINHERIT) &&
+ value % sc->mp->m_sb.sb_rextsize > 0)
+ xchk_ino_set_warning(sc, ino);
}
/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a835ceb79ba5..990b72ae3635 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2763,6 +2763,19 @@ xfs_remove(
error = xfs_droplink(tp, ip);
if (error)
goto out_trans_cancel;
+
+ /*
+ * Point the unlinked child directory's ".." entry to the root
+ * directory to eliminate back-references to inodes that may
+ * get freed before the child directory is closed. If the fs
+ * gets shrunk, this can lead to dirent inode validation errors.
+ */
+ if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
+ error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+ tp->t_mountp->m_sb.sb_rootino, 0);
+ if (error)
+ return error;
+ }
} else {
/*
* When removing a non-directory we need to log the parent
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 65270e63c032..16039ea10ac9 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1065,7 +1065,24 @@ xfs_fill_fsxattr(
fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
- fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+ /*
+ * Don't let a misaligned extent size hint on a directory
+ * escape to userspace if it won't pass the setattr checks
+ * later.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+ fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
+ FS_XFLAG_EXTSZINHERIT);
+ fa->fsx_extsize = 0;
+ } else {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ }
+ }
+
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
fa->fsx_projid = ip->i_projid;
@@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize(
new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. Don't let sysadmins
- * misconfigure directories.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. Don't let sysadmins misconfigure
+ * directories.
*/
if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
(new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 4e7be6b4ca8e..699066fb9052 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -923,16 +923,41 @@ xfs_growfs_rt(
uint8_t *rsum_cache; /* old summary cache */
sbp = &mp->m_sb;
- /*
- * Initial error checking.
- */
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
- (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
- (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
+
+ /* Needs to have been mounted with an rt device. */
+ if (!XFS_IS_REALTIME_MOUNT(mp))
+ return -EINVAL;
+ /*
+ * Mount should fail if the rt bitmap/summary files don't load, but
+ * we'll check anyway.
+ */
+ if (!mp->m_rbmip || !mp->m_rsumip)
+ return -EINVAL;
+
+ /* Shrink not supported. */
+ if (in->newblocks <= sbp->sb_rblocks)
+ return -EINVAL;
+
+ /* Can only change rt extent size when adding rt volume. */
+ if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
+ return -EINVAL;
+
+ /* Range check the extent size. */
+ if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
+ XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
return -EINVAL;
- if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks)))
+
+ /* Unsupported realtime features. */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) ||
+ xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ nrblocks = in->newblocks;
+ error = xfs_sb_validate_fsb_count(sbp, nrblocks);
+ if (error)
return error;
/*
* Read in the last block of the device, make sure it exists.
@@ -996,7 +1021,8 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks;
bmbno++) {
- xfs_trans_t *tp;
+ struct xfs_trans *tp;
+ xfs_rfsblock_t nrblocks_step;
*nmp = *mp;
nsbp = &nmp->m_sb;
@@ -1005,10 +1031,9 @@ xfs_growfs_rt(
*/
nsbp->sb_rextsize = in->extsize;
nsbp->sb_rbmblocks = bmbno + 1;
- nsbp->sb_rblocks =
- XFS_RTMIN(nrblocks,
- nsbp->sb_rbmblocks * NBBY *
- nsbp->sb_blocksize * nsbp->sb_rextsize);
+ nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
+ nsbp->sb_rextsize;
+ nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
nsbp->sb_rextents = nsbp->sb_rblocks;
do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
ASSERT(nsbp->sb_rextents != 0);
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index dbf03635869c..70055d486bf7 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -705,9 +705,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
return 0;
bio = bio_alloc(GFP_NOFS, nr_pages);
- if (!bio)
- return -ENOMEM;
-
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint;
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
index 1d8986563fc5..0728ad07ff7a 100644
--- a/include/dt-bindings/clock/r9a07g044-cpg.h
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -32,58 +32,188 @@
#define R9A07G044_OSCCLK 21
/* R9A07G044 Module Clocks */
-#define R9A07G044_CLK_GIC600 0
-#define R9A07G044_CLK_IA55 1
-#define R9A07G044_CLK_SYC 2
-#define R9A07G044_CLK_DMAC 3
-#define R9A07G044_CLK_SYSC 4
-#define R9A07G044_CLK_MTU 5
-#define R9A07G044_CLK_GPT 6
-#define R9A07G044_CLK_ETH0 7
-#define R9A07G044_CLK_ETH1 8
-#define R9A07G044_CLK_I2C0 9
-#define R9A07G044_CLK_I2C1 10
-#define R9A07G044_CLK_I2C2 11
-#define R9A07G044_CLK_I2C3 12
-#define R9A07G044_CLK_SCIF0 13
-#define R9A07G044_CLK_SCIF1 14
-#define R9A07G044_CLK_SCIF2 15
-#define R9A07G044_CLK_SCIF3 16
-#define R9A07G044_CLK_SCIF4 17
-#define R9A07G044_CLK_SCI0 18
-#define R9A07G044_CLK_SCI1 19
-#define R9A07G044_CLK_GPIO 20
-#define R9A07G044_CLK_SDHI0 21
-#define R9A07G044_CLK_SDHI1 22
-#define R9A07G044_CLK_USB0 23
-#define R9A07G044_CLK_USB1 24
-#define R9A07G044_CLK_CANFD 25
-#define R9A07G044_CLK_SSI0 26
-#define R9A07G044_CLK_SSI1 27
-#define R9A07G044_CLK_SSI2 28
-#define R9A07G044_CLK_SSI3 29
-#define R9A07G044_CLK_MHU 30
-#define R9A07G044_CLK_OSTM0 31
-#define R9A07G044_CLK_OSTM1 32
-#define R9A07G044_CLK_OSTM2 33
-#define R9A07G044_CLK_WDT0 34
-#define R9A07G044_CLK_WDT1 35
-#define R9A07G044_CLK_WDT2 36
-#define R9A07G044_CLK_WDT_PON 37
-#define R9A07G044_CLK_GPU 38
-#define R9A07G044_CLK_ISU 39
-#define R9A07G044_CLK_H264 40
-#define R9A07G044_CLK_CRU 41
-#define R9A07G044_CLK_MIPI_DSI 42
-#define R9A07G044_CLK_LCDC 43
-#define R9A07G044_CLK_SRC 44
-#define R9A07G044_CLK_RSPI0 45
-#define R9A07G044_CLK_RSPI1 46
-#define R9A07G044_CLK_RSPI2 47
-#define R9A07G044_CLK_ADC 48
-#define R9A07G044_CLK_TSU_PCLK 49
-#define R9A07G044_CLK_SPI 50
-#define R9A07G044_CLK_MIPI_DSI_V 51
-#define R9A07G044_CLK_MIPI_DSI_PIN 52
+#define R9A07G044_CA55_SCLK 0
+#define R9A07G044_CA55_PCLK 1
+#define R9A07G044_CA55_ATCLK 2
+#define R9A07G044_CA55_GICCLK 3
+#define R9A07G044_CA55_PERICLK 4
+#define R9A07G044_CA55_ACLK 5
+#define R9A07G044_CA55_TSCLK 6
+#define R9A07G044_GIC600_GICCLK 7
+#define R9A07G044_IA55_CLK 8
+#define R9A07G044_IA55_PCLK 9
+#define R9A07G044_MHU_PCLK 10
+#define R9A07G044_SYC_CNT_CLK 11
+#define R9A07G044_DMAC_ACLK 12
+#define R9A07G044_DMAC_PCLK 13
+#define R9A07G044_OSTM0_PCLK 14
+#define R9A07G044_OSTM1_PCLK 15
+#define R9A07G044_OSTM2_PCLK 16
+#define R9A07G044_MTU_X_MCK_MTU3 17
+#define R9A07G044_POE3_CLKM_POE 18
+#define R9A07G044_GPT_PCLK 19
+#define R9A07G044_POEG_A_CLKP 20
+#define R9A07G044_POEG_B_CLKP 21
+#define R9A07G044_POEG_C_CLKP 22
+#define R9A07G044_POEG_D_CLKP 23
+#define R9A07G044_WDT0_PCLK 24
+#define R9A07G044_WDT0_CLK 25
+#define R9A07G044_WDT1_PCLK 26
+#define R9A07G044_WDT1_CLK 27
+#define R9A07G044_WDT2_PCLK 28
+#define R9A07G044_WDT2_CLK 29
+#define R9A07G044_SPI_CLK2 30
+#define R9A07G044_SPI_CLK 31
+#define R9A07G044_SDHI0_IMCLK 32
+#define R9A07G044_SDHI0_IMCLK2 33
+#define R9A07G044_SDHI0_CLK_HS 34
+#define R9A07G044_SDHI0_ACLK 35
+#define R9A07G044_SDHI1_IMCLK 36
+#define R9A07G044_SDHI1_IMCLK2 37
+#define R9A07G044_SDHI1_CLK_HS 38
+#define R9A07G044_SDHI1_ACLK 39
+#define R9A07G044_GPU_CLK 40
+#define R9A07G044_GPU_AXI_CLK 41
+#define R9A07G044_GPU_ACE_CLK 42
+#define R9A07G044_ISU_ACLK 43
+#define R9A07G044_ISU_PCLK 44
+#define R9A07G044_H264_CLK_A 45
+#define R9A07G044_H264_CLK_P 46
+#define R9A07G044_CRU_SYSCLK 47
+#define R9A07G044_CRU_VCLK 48
+#define R9A07G044_CRU_PCLK 49
+#define R9A07G044_CRU_ACLK 50
+#define R9A07G044_MIPI_DSI_PLLCLK 51
+#define R9A07G044_MIPI_DSI_SYSCLK 52
+#define R9A07G044_MIPI_DSI_ACLK 53
+#define R9A07G044_MIPI_DSI_PCLK 54
+#define R9A07G044_MIPI_DSI_VCLK 55
+#define R9A07G044_MIPI_DSI_LPCLK 56
+#define R9A07G044_LCDC_CLK_A 57
+#define R9A07G044_LCDC_CLK_P 58
+#define R9A07G044_LCDC_CLK_D 59
+#define R9A07G044_SSI0_PCLK2 60
+#define R9A07G044_SSI0_PCLK_SFR 61
+#define R9A07G044_SSI1_PCLK2 62
+#define R9A07G044_SSI1_PCLK_SFR 63
+#define R9A07G044_SSI2_PCLK2 64
+#define R9A07G044_SSI2_PCLK_SFR 65
+#define R9A07G044_SSI3_PCLK2 66
+#define R9A07G044_SSI3_PCLK_SFR 67
+#define R9A07G044_SRC_CLKP 68
+#define R9A07G044_USB_U2H0_HCLK 69
+#define R9A07G044_USB_U2H1_HCLK 70
+#define R9A07G044_USB_U2P_EXR_CPUCLK 71
+#define R9A07G044_USB_PCLK 72
+#define R9A07G044_ETH0_CLK_AXI 73
+#define R9A07G044_ETH0_CLK_CHI 74
+#define R9A07G044_ETH1_CLK_AXI 75
+#define R9A07G044_ETH1_CLK_CHI 76
+#define R9A07G044_I2C0_PCLK 77
+#define R9A07G044_I2C1_PCLK 78
+#define R9A07G044_I2C2_PCLK 79
+#define R9A07G044_I2C3_PCLK 80
+#define R9A07G044_SCIF0_CLK_PCK 81
+#define R9A07G044_SCIF1_CLK_PCK 82
+#define R9A07G044_SCIF2_CLK_PCK 83
+#define R9A07G044_SCIF3_CLK_PCK 84
+#define R9A07G044_SCIF4_CLK_PCK 85
+#define R9A07G044_SCI0_CLKP 86
+#define R9A07G044_SCI1_CLKP 87
+#define R9A07G044_IRDA_CLKP 88
+#define R9A07G044_RSPI0_CLKB 89
+#define R9A07G044_RSPI1_CLKB 90
+#define R9A07G044_RSPI2_CLKB 91
+#define R9A07G044_CANFD_PCLK 92
+#define R9A07G044_GPIO_HCLK 93
+#define R9A07G044_ADC_ADCLK 94
+#define R9A07G044_ADC_PCLK 95
+#define R9A07G044_TSU_PCLK 96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0 0
+#define R9A07G044_CA55_RST_1_1 1
+#define R9A07G044_CA55_RST_3_0 2
+#define R9A07G044_CA55_RST_3_1 3
+#define R9A07G044_CA55_RST_4 4
+#define R9A07G044_CA55_RST_5 5
+#define R9A07G044_CA55_RST_6 6
+#define R9A07G044_CA55_RST_7 7
+#define R9A07G044_CA55_RST_8 8
+#define R9A07G044_CA55_RST_9 9
+#define R9A07G044_CA55_RST_10 10
+#define R9A07G044_CA55_RST_11 11
+#define R9A07G044_CA55_RST_12 12
+#define R9A07G044_GIC600_GICRESET_N 13
+#define R9A07G044_GIC600_DBG_GICRESET_N 14
+#define R9A07G044_IA55_RESETN 15
+#define R9A07G044_MHU_RESETN 16
+#define R9A07G044_DMAC_ARESETN 17
+#define R9A07G044_DMAC_RST_ASYNC 18
+#define R9A07G044_SYC_RESETN 19
+#define R9A07G044_OSTM0_PRESETZ 20
+#define R9A07G044_OSTM1_PRESETZ 21
+#define R9A07G044_OSTM2_PRESETZ 22
+#define R9A07G044_MTU_X_PRESET_MTU3 23
+#define R9A07G044_POE3_RST_M_REG 24
+#define R9A07G044_GPT_RST_C 25
+#define R9A07G044_POEG_A_RST 26
+#define R9A07G044_POEG_B_RST 27
+#define R9A07G044_POEG_C_RST 28
+#define R9A07G044_POEG_D_RST 29
+#define R9A07G044_WDT0_PRESETN 30
+#define R9A07G044_WDT1_PRESETN 31
+#define R9A07G044_WDT2_PRESETN 32
+#define R9A07G044_SPI_RST 33
+#define R9A07G044_SDHI0_IXRST 34
+#define R9A07G044_SDHI1_IXRST 35
+#define R9A07G044_GPU_RESETN 36
+#define R9A07G044_GPU_AXI_RESETN 37
+#define R9A07G044_GPU_ACE_RESETN 38
+#define R9A07G044_ISU_ARESETN 39
+#define R9A07G044_ISU_PRESETN 40
+#define R9A07G044_H264_X_RESET_VCP 41
+#define R9A07G044_H264_CP_PRESET_P 42
+#define R9A07G044_CRU_CMN_RSTB 43
+#define R9A07G044_CRU_PRESETN 44
+#define R9A07G044_CRU_ARESETN 45
+#define R9A07G044_MIPI_DSI_CMN_RSTB 46
+#define R9A07G044_MIPI_DSI_ARESET_N 47
+#define R9A07G044_MIPI_DSI_PRESET_N 48
+#define R9A07G044_LCDC_RESET_N 49
+#define R9A07G044_SSI0_RST_M2_REG 50
+#define R9A07G044_SSI1_RST_M2_REG 51
+#define R9A07G044_SSI2_RST_M2_REG 52
+#define R9A07G044_SSI3_RST_M2_REG 53
+#define R9A07G044_SRC_RST 54
+#define R9A07G044_USB_U2H0_HRESETN 55
+#define R9A07G044_USB_U2H1_HRESETN 56
+#define R9A07G044_USB_U2P_EXL_SYSRST 57
+#define R9A07G044_USB_PRESETN 58
+#define R9A07G044_ETH0_RST_HW_N 59
+#define R9A07G044_ETH1_RST_HW_N 60
+#define R9A07G044_I2C0_MRST 61
+#define R9A07G044_I2C1_MRST 62
+#define R9A07G044_I2C2_MRST 63
+#define R9A07G044_I2C3_MRST 64
+#define R9A07G044_SCIF0_RST_SYSTEM_N 65
+#define R9A07G044_SCIF1_RST_SYSTEM_N 66
+#define R9A07G044_SCIF2_RST_SYSTEM_N 67
+#define R9A07G044_SCIF3_RST_SYSTEM_N 68
+#define R9A07G044_SCIF4_RST_SYSTEM_N 69
+#define R9A07G044_SCI0_RST 70
+#define R9A07G044_SCI1_RST 71
+#define R9A07G044_IRDA_RST 72
+#define R9A07G044_RSPI0_RST 73
+#define R9A07G044_RSPI1_RST 74
+#define R9A07G044_RSPI2_RST 75
+#define R9A07G044_CANFD_RSTP_N 76
+#define R9A07G044_CANFD_RSTC_N 77
+#define R9A07G044_GPIO_RSTN 78
+#define R9A07G044_GPIO_PORT_RESETN 79
+#define R9A07G044_GPIO_SPARE_RESETN 80
+#define R9A07G044_ADC_PRESETN 81
+#define R9A07G044_ADC_ADRST_N 82
+#define R9A07G044_TSU_PRESETN 83
#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3177181c4326..987f15089eeb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -18,7 +18,6 @@
#include <linux/bio.h>
#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
@@ -28,14 +27,11 @@
#include <linux/sbitmap.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
@@ -275,9 +271,6 @@ enum blk_queue_state {
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
/*
* Zoned block device models (zoned limit).
*
@@ -506,11 +499,6 @@ struct request_queue {
unsigned int max_active_zones;
#endif /* CONFIG_BLK_DEV_ZONED */
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
int node;
struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
@@ -537,10 +525,6 @@ struct request_queue {
int mq_freeze_depth;
-#if defined(CONFIG_BLK_DEV_BSG)
- struct bsg_class_device bsg_dev;
-#endif
-
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@@ -888,16 +872,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
extern void blk_queue_split(struct bio **);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
-extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
-
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
@@ -1346,8 +1320,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
-
static inline bool bdev_is_partition(struct block_device *bdev)
{
return bdev->bd_partno;
@@ -1376,6 +1348,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
return q->limits.max_sectors;
}
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f309fc1509f2..e8e2b0393ca9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -780,6 +780,7 @@ struct bpf_jit_poke_descriptor {
void *tailcall_target;
void *tailcall_bypass;
void *bypass_addr;
+ void *aux;
union {
struct {
struct bpf_map *map;
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 960988d42f77..6b211323a489 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <scsi/scsi_request.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index dac37b6e00ec..1ac81c809da9 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -4,36 +4,16 @@
#include <uapi/linux/bsg.h>
-struct request;
+struct bsg_device;
+struct device;
+struct request_queue;
-#ifdef CONFIG_BLK_DEV_BSG
-struct bsg_ops {
- int (*check_proto)(struct sg_io_v4 *hdr);
- int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode);
- int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
- void (*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout);
-struct bsg_class_device {
- struct device *class_dev;
- int minor;
- struct request_queue *queue;
- const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops);
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline int bsg_scsi_register_queue(struct request_queue *q,
- struct device *parent)
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG */
#endif /* _LINUX_BSG_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index f48d0a31deae..c4fef00abdf3 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -86,11 +86,13 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
int cdrom_multisession(struct cdrom_device_info *cdi,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 29dbb603bc91..232daaec56e4 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -758,6 +758,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
enum ethtool_link_mode_bit_indices link_mode);
/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update
* @fmt: Format of string to write
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 37e1e8f7f08d..e2bc16300c82 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -139,6 +139,8 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
/*
* sget() wrappers to be called from the ->get_tree() op.
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5310e217bd74..dd874a1ee862 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -3,6 +3,7 @@
#define _LINUX_KASAN_H
#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index acee44b9db26..0f06c2287b52 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -22,14 +22,10 @@
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
-/* PHY IDs and mask for Alaska 10G PHYs */
-#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8
-#define MARVELL_PHY_ID_88X3310 0x002b09a0
-#define MARVELL_PHY_ID_88X3340 0x002b09a8
-
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 9b7b7cd3bae9..23dadf7aeba8 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -51,7 +51,6 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
-extern void copy_huge_page(struct page *dst, struct page *src);
#else
static inline void putback_movable_pages(struct list_head *l) {}
@@ -77,10 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
-
-static inline void copy_huge_page(struct page *dst, struct page *src)
-{
-}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 57453dba41b9..7ca22e6e694a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -906,6 +906,7 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
+void copy_huge_page(struct page *dst, struct page *src);
/*
* Compound pages have a destructor function. Provide a
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index aba237c0b3a2..71fac9237725 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -11,7 +11,10 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
/**
* struct ptp_clock_request - request PTP clock event
*
@@ -134,7 +137,7 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
*/
void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamps: skb_shared_hwtstamps structure pointer
+ * @vclock_index: phc index of ptp vclock.
+ */
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index)
+{ }
#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 83fb86133fe1..c976cc6de257 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
+{
+}
static inline int page_mkclean(struct page *page)
{
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 79d0a1237e6c..80e781c51ddc 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -101,6 +101,10 @@ struct scmi_clk_proto_ops {
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -153,7 +157,7 @@ struct scmi_power_proto_ops {
};
/**
- * scmi_sensor_reading - represent a timestamped read
+ * struct scmi_sensor_reading - represent a timestamped read
*
* Used by @reading_get_timestamped method.
*
@@ -167,7 +171,7 @@ struct scmi_sensor_reading {
};
/**
- * scmi_range_attrs - specifies a sensor or axis values' range
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
* @min_range: The minimum value which can be represented by the sensor/axis.
* @max_range: The maximum value which can be represented by the sensor/axis.
*/
@@ -177,7 +181,7 @@ struct scmi_range_attrs {
};
/**
- * scmi_sensor_axis_info - describes one sensor axes
+ * struct scmi_sensor_axis_info - describes one sensor axes
* @id: The axes ID.
* @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
* @scale: Power-of-10 multiplier applied to the axis unit.
@@ -205,8 +209,8 @@ struct scmi_sensor_axis_info {
};
/**
- * scmi_sensor_intervals_info - describes number and type of available update
- * intervals
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
* @segmented: Flag for segmented intervals' representation. When True there
* will be exactly 3 intervals in @desc, with each entry
* representing a member of a segment in this order:
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index afbf8037d8db..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -51,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d5ae621d66ba..a6f03b36fc4f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -115,7 +115,9 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
+ struct mutex lock;
int enable;
+ u32 btr_reserve[2];
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 143568d64b20..4b57bbba588a 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -338,7 +338,7 @@ do { \
FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \
break; \
} \
- /* FALLTHRU */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 15335732e166..625d9c72dee3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -201,6 +201,11 @@ struct bond_up_slave {
*/
#define BOND_LINK_NOCHANGE -1
+struct bond_ipsec {
+ struct list_head list;
+ struct xfrm_state *xs;
+};
+
/*
* Here are the locking policies for the two bonding locks:
* Get rcu_read_lock when reading or RTNL when writing slave list.
@@ -249,7 +254,9 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
#ifdef CONFIG_XFRM_OFFLOAD
- struct xfrm_state *xs;
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+ spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
};
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 73af4a64a599..40296ed976a9 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void)
static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return sk->sk_ll_usec && !signal_pending(current);
+ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
}
bool sk_busy_loop_end(void *p, unsigned long start_time);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
deleted file mode 100644
index 552cf68d28d2..000000000000
--- a/include/net/caif/caif_hsi.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / daniel.martensson@stericsson.com
- * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
- */
-
-#ifndef CAIF_HSI_H_
-#define CAIF_HSI_H_
-
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_device.h>
-#include <linux/atomic.h>
-
-/*
- * Maximum number of CAIF frames that can reside in the same HSI frame.
- */
-#define CFHSI_MAX_PKTS 15
-
-/*
- * Maximum number of bytes used for the frame that can be embedded in the
- * HSI descriptor.
- */
-#define CFHSI_MAX_EMB_FRM_SZ 96
-
-/*
- * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFHSI_DBG_PREFILL 0
-
-/* Structure describing a HSI packet descriptor. */
-#pragma pack(1) /* Byte alignment. */
-struct cfhsi_desc {
- u8 header;
- u8 offset;
- u16 cffrm_len[CFHSI_MAX_PKTS];
- u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
-};
-#pragma pack() /* Default alignment. */
-
-/* Size of the complete HSI packet descriptor. */
-#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
-
-/*
- * Size of the complete HSI packet descriptor excluding the optional embedded
- * CAIF frame.
- */
-#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
-
-/*
- * Maximum bytes transferred in one transfer.
- */
-#define CFHSI_MAX_CAIF_FRAME_SZ 4096
-
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
-
-/* Size of the complete HSI TX buffer. */
-#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Size of the complete HSI RX buffer. */
-#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Bitmasks for the HSI descriptor. */
-#define CFHSI_PIGGY_DESC (0x01 << 7)
-
-#define CFHSI_TX_STATE_IDLE 0
-#define CFHSI_TX_STATE_XFER 1
-
-#define CFHSI_RX_STATE_DESC 0
-#define CFHSI_RX_STATE_PAYLOAD 1
-
-/* Bitmasks for power management. */
-#define CFHSI_WAKE_UP 0
-#define CFHSI_WAKE_UP_ACK 1
-#define CFHSI_WAKE_DOWN_ACK 2
-#define CFHSI_AWAKE 3
-#define CFHSI_WAKELOCK_HELD 4
-#define CFHSI_SHUTDOWN 5
-#define CFHSI_FLUSH_FIFO 6
-
-#ifndef CFHSI_INACTIVITY_TOUT
-#define CFHSI_INACTIVITY_TOUT (1 * HZ)
-#endif /* CFHSI_INACTIVITY_TOUT */
-
-#ifndef CFHSI_WAKE_TOUT
-#define CFHSI_WAKE_TOUT (3 * HZ)
-#endif /* CFHSI_WAKE_TOUT */
-
-#ifndef CFHSI_MAX_RX_RETRIES
-#define CFHSI_MAX_RX_RETRIES (10 * HZ)
-#endif
-
-/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_cb_ops {
- void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
-};
-
-/* Structure implemented by HSI device. */
-struct cfhsi_ops {
- int (*cfhsi_up) (struct cfhsi_ops *dev);
- int (*cfhsi_down) (struct cfhsi_ops *dev);
- int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
- int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
- int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
- int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
- int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
- struct cfhsi_cb_ops *cb_ops;
-};
-
-/* Structure holds status of received CAIF frames processing */
-struct cfhsi_rx_state {
- int state;
- int nfrms;
- int pld_len;
- int retries;
- bool piggy_desc;
-};
-
-/* Priority mapping */
-enum {
- CFHSI_PRIO_CTL = 0,
- CFHSI_PRIO_VI,
- CFHSI_PRIO_VO,
- CFHSI_PRIO_BEBK,
- CFHSI_PRIO_LAST,
-};
-
-struct cfhsi_config {
- u32 inactivity_timeout;
- u32 aggregation_timeout;
- u32 head_align;
- u32 tail_align;
- u32 q_high_mark;
- u32 q_low_mark;
-};
-
-/* Structure implemented by CAIF HSI drivers. */
-struct cfhsi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead[CFHSI_PRIO_LAST];
- struct cfhsi_cb_ops cb_ops;
- struct cfhsi_ops *ops;
- int tx_state;
- struct cfhsi_rx_state rx_state;
- struct cfhsi_config cfg;
- int rx_len;
- u8 *rx_ptr;
- u8 *tx_buf;
- u8 *rx_buf;
- u8 *rx_flip_buf;
- spinlock_t lock;
- int flow_off_sent;
- struct list_head list;
- struct work_struct wake_up_work;
- struct work_struct wake_down_work;
- struct work_struct out_of_sync_work;
- struct workqueue_struct *wq;
- wait_queue_head_t wake_up_wait;
- wait_queue_head_t wake_down_wait;
- wait_queue_head_t flush_fifo_wait;
- struct timer_list inactivity_timer;
- struct timer_list rx_slowpath_timer;
-
- /* TX aggregation */
- int aggregation_len;
- struct timer_list aggregation_timer;
-
- unsigned long bits;
-};
-extern struct platform_driver cfhsi_driver;
-
-/**
- * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
- * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
- * taking the HSI wakeline down, in milliseconds.
- * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
- * enum ifla_caif_hsi is used to specify the configuration attributes.
- */
-enum ifla_caif_hsi {
- __IFLA_CAIF_HSI_UNSPEC,
- __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- __IFLA_CAIF_HSI_HEAD_ALIGN,
- __IFLA_CAIF_HSI_TAIL_ALIGN,
- __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- __IFLA_CAIF_HSI_QLOW_WATERMARK,
- __IFLA_CAIF_HSI_MAX
-};
-
-struct cfhsi_ops *cfhsi_get_ops(void);
-
-#endif /* CAIF_HSI_H_ */
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 56cb3c38569a..14efa0ded75d 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb)
return &md_dst->u.tun_info;
dst = skb_dst(skb);
- if (dst && dst->lwtstate)
+ if (dst && dst->lwtstate &&
+ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
return lwt_tun_info(dst->lwtstate);
return NULL;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f14149df5a65..625a38ccb5d9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -263,7 +263,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
{
int mtu;
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index cb580b06152f..8b5af683a818 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -105,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts);
@@ -227,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk,
return false;
}
-static inline void mptcp_incoming_options(struct sock *sk,
+static inline bool mptcp_incoming_options(struct sock *sk,
struct sk_buff *skb)
{
+ return true;
}
static inline void mptcp_skb_ext_move(struct sk_buff *to,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 09f2efea0b97..13807ea94cd2 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
void nf_conntrack_proto_pernet_init(struct net *net);
-void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
void nf_conntrack_proto_fini(void);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index c3094b83a525..37e5300c7e5a 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -27,6 +27,7 @@ struct nf_tcp_net {
u8 tcp_loose;
u8 tcp_be_liberal;
u8 tcp_max_retrans;
+ u8 tcp_ignore_invalid_rst;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
unsigned int offload_timeout;
unsigned int offload_pickup;
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 265fffa33dad..5859e0a16a58 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -360,8 +360,7 @@ enum {
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
- * 192.88.99.0/24.
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
* addresses.
*/
@@ -369,7 +368,6 @@ enum {
((htonl(INADDR_BROADCAST) == a) || \
ipv4_is_multicast(a) || \
ipv4_is_zeronet(a) || \
- ipv4_is_test_198(a) || \
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 8bdd80027ffb..f23cb259b0e2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -316,7 +316,9 @@ struct bpf_local_storage;
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
- * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+ * for timestamping
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
@@ -493,6 +495,7 @@ struct sock {
seqlock_t sk_stamp_seq;
#endif
u16 sk_tsflags;
+ int sk_bind_phc;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
@@ -2755,7 +2758,8 @@ void sock_def_readable(struct sock *sk);
int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
-int sock_set_timestamping(struct sock *sk, int optname, int val);
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping);
void sock_enable_timestamps(struct sock *sk);
void sock_no_linger(struct sock *sk);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e668f1bf780d..17df9b047ee4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -686,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 779a59fe8676..6c5a1c1c6b1e 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -111,9 +111,6 @@ struct scsi_cmnd {
reconnects. Probably == sector
size */
- struct request *request; /* The command we are
- working on */
-
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
@@ -146,6 +143,12 @@ struct scsi_cmnd {
unsigned int extra_len; /* length of alignment and padding */
};
+/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
+static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
+{
+ return blk_mq_rq_from_pdu(scmd);
+}
+
/*
* Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template.
@@ -158,7 +161,9 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
/* make sure not to use it with passthrough commands */
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+
+ return *(struct scsi_driver **)rq->rq_disk->private_data;
}
extern void scsi_finish_command(struct scsi_cmnd *cmd);
@@ -220,6 +225,25 @@ static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
buf, buflen);
}
+static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
+{
+ return blk_rq_pos(scsi_cmd_to_rq(scmd));
+}
+
+static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
+static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
/*
* The operations below are hints that tell the controller driver how
* to handle I/Os with DIF or similar types of protection information.
@@ -282,9 +306,11 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
return scmd->prot_type;
}
-static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
{
- return blk_rq_pos(scmd->request);
+ struct request *rq = blk_mq_rq_from_pdu(scmd);
+
+ return t10_pi_ref_tag(rq);
}
static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index ac6ab16abee7..09a17f6e93a7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/sbitmap.h>
+struct bsg_device;
struct device;
struct request_queue;
struct scsi_cmnd;
@@ -205,6 +206,7 @@ struct scsi_device {
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
* creation time */
+ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
bool offline_already; /* Device offline message logged */
@@ -234,6 +236,10 @@ struct scsi_device {
size_t dma_drain_len;
void *dma_drain_buf;
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+
+ struct bsg_device *bsg_dev;
unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state;
@@ -265,13 +271,15 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
__printf(3, 4) void
scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
-#define scmd_dbg(scmd, fmt, a...) \
- do { \
- if ((scmd)->request->rq_disk) \
- sdev_dbg((scmd)->device, "[%s] " fmt, \
- (scmd)->request->rq_disk->disk_name, ##a);\
- else \
- sdev_dbg((scmd)->device, fmt, ##a); \
+#define scmd_dbg(scmd, fmt, a...) \
+ do { \
+ struct request *__rq = scsi_cmd_to_rq((scmd)); \
+ \
+ if (__rq->rq_disk) \
+ sdev_dbg((scmd)->device, "[%s] " fmt, \
+ __rq->rq_disk->disk_name, ##a); \
+ else \
+ sdev_dbg((scmd)->device, fmt, ##a); \
} while (0)
enum scsi_target_state {
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3fdb322d4c4b..5d14adae21c7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -28,7 +28,8 @@
#define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
/* override additional length field */
#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10))
-#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11))
+/* ignore MEDIA CHANGE unit attention after resuming from runtime suspend */
+#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
@@ -73,8 +74,7 @@
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
- __BLIST_UNUSED_13 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
__BLIST_UNUSED_14 | \
__BLIST_UNUSED_15 | \
__BLIST_UNUSED_16 | \
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index b465799f4d2d..d2cb9aeaf1f1 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+struct gendisk;
struct scsi_device;
+struct sg_io_hdr;
/*
* Structures used for scsi_ioctl et al.
@@ -43,8 +45,11 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
-extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
+ int cmd, void __user *arg);
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
index b06f28c74908..9129b23e12bc 100644
--- a/include/scsi/scsi_request.h
+++ b/include/scsi/scsi_request.h
@@ -28,6 +28,4 @@ static inline void scsi_req_free_cmd(struct scsi_request *req)
kfree(req->cmd);
}
-void scsi_req_init(struct scsi_request *req);
-
#endif /* _SCSI_SCSI_REQUEST_H */
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index e19c2504a14b..1066b1194a5a 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -237,14 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
#ifdef CONFIG_TEGRA_MC
struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
#else
static inline struct tegra_mc *
devm_tegra_memory_controller_get(struct device *dev)
{
return ERR_PTR(-ENODEV);
}
-#endif
-int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
+static inline int
+tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 85c16c266eac..f53e0f160695 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -171,7 +171,7 @@ enum tcm_sense_reason_table {
TCM_WRITE_PROTECTED = R(0x0c),
TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
- TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
+
TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
@@ -188,6 +188,10 @@ enum tcm_sense_reason_table {
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
TCM_LUN_BUSY = R(0x1e),
TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f),
+ TCM_ALUA_TG_PT_STANDBY = R(0x20),
+ TCM_ALUA_TG_PT_UNAVAILABLE = R(0x21),
+ TCM_ALUA_STATE_TRANSITION = R(0x22),
+ TCM_ALUA_OFFLINE = R(0x23),
#undef R
};
@@ -455,8 +459,6 @@ enum target_core_dif_check {
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
- u8 scsi_asc;
- u8 scsi_ascq;
u16 scsi_sense_length;
unsigned unknown_data_length:1;
bool state_active:1;
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index c7135c9c37a5..b3b93710eff7 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -46,6 +46,7 @@ enum {
ETHTOOL_MSG_FEC_SET,
ETHTOOL_MSG_MODULE_EEPROM_GET,
ETHTOOL_MSG_STATS_GET,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -88,6 +89,7 @@ enum {
ETHTOOL_MSG_FEC_NTF,
ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
ETHTOOL_MSG_STATS_GET_REPLY,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -440,6 +442,19 @@ enum {
ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
};
+/* PHC VCLOCKS */
+
+enum {
+ ETHTOOL_A_PHC_VCLOCKS_UNSPEC,
+ ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */
+ ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PHC_VCLOCKS_CNT,
+ ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1)
+};
+
/* CABLE TEST */
enum {
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 7ed0b3d1c00a..fcc61c73a666 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
-/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+/* SO_TIMESTAMPING flags */
enum {
SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
@@ -30,8 +30,9 @@ enum {
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
+ SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -47,6 +48,18 @@ enum {
SOF_TIMESTAMPING_TX_ACK)
/**
+ * struct so_timestamping - SO_TIMESTAMPING parameter
+ *
+ * @flags: SO_TIMESTAMPING flags
+ * @bind_phc: Index of PTP virtual clock bound to sock. This is available
+ * if flag SOF_TIMESTAMPING_BIND_PHC is set.
+ */
+struct so_timestamping {
+ int flags;
+ int bind_phc;
+};
+
+/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
* @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP
diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h
index 45c8d3b027e0..0af9c113d665 100644
--- a/include/uapi/linux/netfilter/nfnetlink_log.h
+++ b/include/uapi/linux/netfilter/nfnetlink_log.h
@@ -61,7 +61,7 @@ enum nfulnl_attr_type {
NFULA_HWTYPE, /* hardware type */
NFULA_HWHEADER, /* hardware header */
NFULA_HWLEN, /* hardware header length */
- NFULA_CT, /* nf_conntrack_netlink.h */
+ NFULA_CT, /* nfnetlink_conntrack.h */
NFULA_CT_INFO, /* enum ip_conntrack_info */
NFULA_VLAN, /* nested attribute: packet vlan info */
NFULA_L2HDR, /* full L2 header */
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index bcb2cb5d40b9..aed90c4df0c8 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -51,11 +51,11 @@ enum nfqnl_attr_type {
NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
- NFQA_CT, /* nf_conntrack_netlink.h */
+ NFQA_CT, /* nfnetlink_conntrack.h */
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
- NFQA_EXP, /* nf_conntrack_netlink.h */
+ NFQA_EXP, /* nfnetlink_conntrack.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 95b1597f16ae..27ace512babd 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -46,6 +46,7 @@
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
+#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
struct tcmu_mailbox {
__u16 version;
@@ -75,6 +76,7 @@ struct tcmu_cmd_entry_hdr {
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
#define TCMU_UFLAG_READ_LEN 0x2
+#define TCMU_UFLAG_KEEP_BUF 0x4
__u8 uflags;
} __packed;
diff --git a/init/Kconfig b/init/Kconfig
index bb0d6e6262b1..55f9f7738ebb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1847,7 +1847,6 @@ config SLUB_DEBUG
default y
bool "Enable SLUB debugging support" if EXPERT
depends on SLUB && SYSFS
- select STACKDEPOT if STACKTRACE_SUPPORT
help
SLUB has extensive debug support features. Disabling these can
result in significant savings in code size. This also disables
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 034ad93a1ad7..9b1577498373 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2236,8 +2236,14 @@ static void bpf_prog_free_deferred(struct work_struct *work)
#endif
if (aux->dst_trampoline)
bpf_trampoline_put(aux->dst_trampoline);
- for (i = 0; i < aux->func_cnt; i++)
+ for (i = 0; i < aux->func_cnt; i++) {
+ /* We can just unlink the subprog poke descriptor table as
+ * it was originally linked to the main program and is also
+ * released along with it.
+ */
+ aux->func[i]->aux->poke_tab = NULL;
bpf_jit_free(aux->func[i]);
+ }
if (aux->func_cnt) {
kfree(aux->func);
bpf_prog_unlock_free(aux->prog);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 2546dafd6672..fdc20892837c 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -558,7 +558,8 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
- dst = READ_ONCE(dtab->netdev_map[i]);
+ dst = rcu_dereference_check(dtab->netdev_map[i],
+ rcu_read_lock_bh_held());
if (!is_valid_dst(dst, xdp, exclude_ifindex))
continue;
@@ -654,7 +655,8 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
- dst = READ_ONCE(dtab->netdev_map[i]);
+ dst = rcu_dereference_check(dtab->netdev_map[i],
+ rcu_read_lock_bh_held());
if (!dst || dst->dev->ifindex == exclude_ifindex)
continue;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index be38bb930bf1..42a4063de7cd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12121,33 +12121,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
goto out_free;
func[i]->is_func = 1;
func[i]->aux->func_idx = i;
- /* the btf and func_info will be freed only at prog->aux */
+ /* Below members will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
+ func[i]->aux->poke_tab = prog->aux->poke_tab;
+ func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
for (j = 0; j < prog->aux->size_poke_tab; j++) {
- u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
- int ret;
+ struct bpf_jit_poke_descriptor *poke;
- if (!(insn_idx >= subprog_start &&
- insn_idx <= subprog_end))
- continue;
-
- ret = bpf_jit_add_poke_descriptor(func[i],
- &prog->aux->poke_tab[j]);
- if (ret < 0) {
- verbose(env, "adding tail call poke descriptor failed\n");
- goto out_free;
- }
-
- func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
-
- map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
- ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
- if (ret < 0) {
- verbose(env, "tracking tail call prog failed\n");
- goto out_free;
- }
+ poke = &prog->aux->poke_tab[j];
+ if (poke->insn_idx < subprog_end &&
+ poke->insn_idx >= subprog_start)
+ poke->aux = func[i]->aux;
}
/* Use bpf_prog_F_tag to indicate functions in stack traces.
@@ -12178,18 +12164,6 @@ static int jit_subprogs(struct bpf_verifier_env *env)
cond_resched();
}
- /* Untrack main program's aux structs so that during map_poke_run()
- * we will not stumble upon the unfilled poke descriptors; each
- * of the main program's poke descs got distributed across subprogs
- * and got tracked onto map, so we are sure that none of them will
- * be missed after the operation below
- */
- for (i = 0; i < prog->aux->size_poke_tab; i++) {
- map_ptr = prog->aux->poke_tab[i].tail_call.map;
-
- map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
- }
-
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
@@ -12267,14 +12241,22 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_jit_attempt_done(prog);
return 0;
out_free:
+ /* We failed JIT'ing, so at this point we need to unregister poke
+ * descriptors from subprogs, so that kernel is not attempting to
+ * patch it anymore as we're freeing the subprog JIT memory.
+ */
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ map_ptr = prog->aux->poke_tab[i].tail_call.map;
+ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+ }
+ /* At this point we're guaranteed that poke descriptors are not
+ * live anymore. We can just unlink its descriptor table as it's
+ * released with the main prog.
+ */
for (i = 0; i < env->subprog_cnt; i++) {
if (!func[i])
continue;
-
- for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
- map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
- map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
- }
+ func[i]->aux->poke_tab = NULL;
bpf_jit_free(func[i]);
}
kfree(func);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index ee93b6e89587..8d6bf56ed77a 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -911,13 +911,11 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) {
- if (strcmp(param->key, "source") == 0) {
- if (fc->source)
- return invalf(fc, "Multiple sources not supported");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ int ret;
+
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index 8372897402f4..b6f28fad4307 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -1045,8 +1045,8 @@ int gdb_serial_stub(struct kgdb_state *ks)
gdb_cmd_detachkill(ks);
return DBG_PASS_EVENT;
}
-#endif
fallthrough;
+#endif
case 'C': /* Exception passing */
tmp = gdb_cmd_exception_pass(ks);
if (tmp > 0)
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 313d4547cbc7..d998a76fb542 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -487,13 +487,13 @@ ref_scale_reader(void *arg)
s64 duration;
VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
- set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+ WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
set_user_nice(current, MAX_NICE);
atomic_inc(&n_init);
if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
repeat:
- VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
// Wait for signal that this reader can start.
wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
@@ -503,7 +503,7 @@ repeat:
goto end;
// Make sure that the CPU is affinitized appropriately during testing.
- WARN_ON_ONCE(smp_processor_id() != me);
+ WARN_ON_ONCE(raw_smp_processor_id() != me);
WRITE_ONCE(rt->start_reader, 0);
if (!atomic_dec_return(&n_started))
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 03a118d1c003..8536c55df514 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -953,10 +953,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
in_qs = likely(!t->trc_reader_nesting);
}
- // Mark as checked. Because this is called from the grace-period
- // kthread, also remove the task from the holdout list.
+ // Mark as checked so that the grace-period kthread will
+ // remove it from the holdout list.
t->trc_reader_checked = true;
- trc_del_holdout(t);
if (in_qs)
return true; // Already in quiescent state, done!!!
@@ -983,7 +982,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// The current task had better be in a quiescent state.
if (t == current) {
t->trc_reader_checked = true;
- trc_del_holdout(t);
WARN_ON_ONCE(t->trc_reader_nesting);
return;
}
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 3f937b20814f..6c76988cc019 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -795,9 +795,9 @@ void show_rcu_gp_kthreads(void)
jr = j - data_race(rcu_state.gp_req_activity);
js = j - data_race(rcu_state.gp_start);
jw = j - data_race(rcu_state.gp_wake_time);
- pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
+ pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
- rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU,
+ rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
(long)data_race(rcu_state.gp_seq),
(long)data_race(rcu_get_root()->gp_seq_needed),
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 2377cbb32474..29e8fc5d91a7 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -405,15 +405,15 @@ static int scftorture_invoker(void *arg)
VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
cpu = scfp->cpu % nr_cpu_ids;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
set_user_nice(current, MAX_NICE);
if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
- VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
+ VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
// Make sure that the CPU is affinitized appropriately during testing.
- curcpu = smp_processor_id();
+ curcpu = raw_smp_processor_id();
WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids,
"%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
__func__, scfp->cpu, curcpu, nr_cpu_ids);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0207aeed31e6..16a9dfc9fffc 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1689,7 +1689,9 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
if (WARN_ON_ONCE(!field))
goto out;
- if (is_string_field(field)) {
+ /* Pointers to strings are just pointers and dangerous to dereference */
+ if (is_string_field(field) &&
+ (field->filter_type != FILTER_PTR_STRING)) {
flags |= HIST_FIELD_FL_STRING;
hist_field->size = MAX_FILTER_STR_VAL;
@@ -4495,8 +4497,6 @@ static inline void add_to_key(char *compound_key, void *key,
field = key_field->field;
if (field->filter_type == FILTER_DYN_STRING)
size = *(u32 *)(rec + field->offset) >> 16;
- else if (field->filter_type == FILTER_PTR_STRING)
- size = strlen(key);
else if (field->filter_type == FILTER_STATIC_STRING)
size = field->size;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 8c55c4723692..c259842f6d44 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -628,10 +628,8 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
- struct page *page;
entry = xa_load(&dmirror->pt, pfn);
- page = xa_untag_pointer(entry);
if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
return -EPERM;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 924553aa8f78..dfc940d5221d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
continue;
}
- refs = min3(pages_per_huge_page(h) - pfn_offset,
- (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+ /* vaddr may not be aligned to PAGE_SIZE */
+ refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+ (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
record_subpages_vmas(mem_map_offset(page, pfn_offset),
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 98e3059bfea4..d739cdd1621a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_KASAN_HW_TAGS
#include <linux/static_key.h>
+#include "../slab.h"
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
+ /*
+ * Explicitly initialize the memory with the precise object size to
+ * avoid overwriting the SLAB redzone. This disables initialization in
+ * the arch code and may thus lead to performance penalty. The penalty
+ * is accepted since SLAB redzones aren't enabled in production builds.
+ */
+ if (__slub_debug_enabled() &&
+ init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
+ init = false;
+ memzero_explicit((void *)addr, size);
+ }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/migrate.c b/mm/migrate.c
index 23cbd9de030b..34a9ad3e0a4f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -537,54 +537,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
- * Gigantic pages are so large that we do not guarantee that page++ pointer
- * arithmetic will work across the entire page. We need something more
- * specialized.
- */
-static void __copy_gigantic_page(struct page *dst, struct page *src,
- int nr_pages)
-{
- int i;
- struct page *dst_base = dst;
- struct page *src_base = src;
-
- for (i = 0; i < nr_pages; ) {
- cond_resched();
- copy_highpage(dst, src);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
- }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
- int i;
- int nr_pages;
-
- if (PageHuge(src)) {
- /* hugetlbfs page */
- struct hstate *h = page_hstate(src);
- nr_pages = pages_per_huge_page(h);
-
- if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
- __copy_gigantic_page(dst, src, nr_pages);
- return;
- }
- } else {
- /* thp page */
- BUG_ON(!PageTransHuge(src));
- nr_pages = thp_nr_pages(src);
- }
-
- for (i = 0; i < nr_pages; i++) {
- cond_resched();
- copy_highpage(dst + i, src + i);
- }
-}
-
-/*
* Copy the page to its new location
*/
void migrate_page_states(struct page *newpage, struct page *page)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b97e17806be..3e97e68aef7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
@@ -5221,9 +5221,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
- if (unlikely(nr_pages <= 0))
- return 0;
-
/*
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
@@ -5231,19 +5228,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* No pages requested? */
+ if (unlikely(nr_pages <= 0))
+ goto out;
+
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
- return nr_populated;
+ goto out;
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * PAGE_OWNER may recurse into the allocator to allocate space to
+ * save the stack with pagesets.lock held. Releasing/reacquiring
+ * removes much of the performance benefit of bulk allocation so
+ * force the caller to allocate one page at a time as it'll have
+ * similar performance to added complexity to the bulk allocator.
+ */
+ if (static_branch_unlikely(&page_owner_inited))
+ goto failed;
+#endif
+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
- return 0;
+ goto out;
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+out:
return nr_populated;
failed_irq:
@@ -5326,7 +5340,7 @@ failed:
nr_populated++;
}
- return nr_populated;
+ goto out;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
diff --git a/mm/rmap.c b/mm/rmap.c
index 795f9d5f8386..b9eb5c12f3fe 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1440,21 +1440,20 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* If the page is mlock()d, we cannot swap it out.
*/
- if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- /* PTE-mapped THP are never marked as mlocked */
- if (!PageTransCompound(page) ||
- (PageHead(page) && !PageDoubleMap(page))) {
- /*
- * Holding pte lock, we do *not* need
- * mmap_lock here
- */
- mlock_vma_page(page);
- }
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
+ if (!(flags & TTU_IGNORE_MLOCK) &&
+ (vma->vm_flags & VM_LOCKED)) {
+ /*
+ * PTE-mapped THP are never marked as mlocked: so do
+ * not set it on a DoubleMap THP, nor on an Anon THP
+ * (which may still be PTE-mapped after DoubleMap was
+ * cleared). But stop unmapping even in those cases.
+ */
+ if (!PageTransCompound(page) || (PageHead(page) &&
+ !PageDoubleMap(page) && !PageAnon(page)))
+ mlock_vma_page(page);
+ page_vma_mapped_walk_done(&pvmw);
+ ret = false;
+ break;
}
/* Unexpected PMD-mapped THP? */
@@ -1986,8 +1985,10 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
*/
if (vma->vm_flags & VM_LOCKED) {
/*
- * PTE-mapped THP are never marked as mlocked, but
- * this function is never called when PageDoubleMap().
+ * PTE-mapped THP are never marked as mlocked; but
+ * this function is never called on a DoubleMap THP,
+ * nor on an Anon THP (which may still be PTE-mapped
+ * after DoubleMap was cleared).
*/
mlock_vma_page(page);
/*
@@ -2022,6 +2023,10 @@ void page_mlock(struct page *page)
VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+ /* Anon THP are only marked as mlocked when singly mapped */
+ if (PageTransCompound(page) && PageAnon(page))
+ return;
+
rmap_walk(page, &rwc);
}
diff --git a/mm/slab.h b/mm/slab.h
index 67e06637ff2e..f997fd5e42c8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
}
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
#endif
/*
@@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
*/
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
- VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
- if (static_branch_unlikely(&slub_debug_enabled))
+ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+ if (__slub_debug_enabled())
return s->flags & flags;
-#endif
return false;
}
diff --git a/mm/slub.c b/mm/slub.c
index dc863c1ea324..090fa14628f9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -26,7 +26,6 @@
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
-#include <linux/stackdepot.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/kfence.h>
@@ -120,25 +119,11 @@
*/
#ifdef CONFIG_SLUB_DEBUG
-
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-
-static inline bool __slub_debug_enabled(void)
-{
- return static_branch_unlikely(&slub_debug_enabled);
-}
-
-#else /* CONFIG_SLUB_DEBUG */
-
-static inline bool __slub_debug_enabled(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLUB_DEBUG */
static inline bool kmem_cache_debug(struct kmem_cache *s)
@@ -221,8 +206,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
-#ifdef CONFIG_STACKDEPOT
- depot_stack_handle_t handle;
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
@@ -626,27 +611,22 @@ static struct track *get_track(struct kmem_cache *s, void *object,
return kasan_reset_tag(p + alloc);
}
-#ifdef CONFIG_STACKDEPOT
-static depot_stack_handle_t save_stack_depot_trace(gfp_t flags)
-{
- unsigned long entries[TRACK_ADDRS_COUNT];
- depot_stack_handle_t handle;
- unsigned int nr_entries;
-
- nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
- handle = stack_depot_save(entries, nr_entries, flags);
- return handle;
-}
-#endif
-
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
struct track *p = get_track(s, object, alloc);
if (addr) {
-#ifdef CONFIG_STACKDEPOT
- p->handle = save_stack_depot_trace(GFP_NOWAIT);
+#ifdef CONFIG_STACKTRACE
+ unsigned int nr_entries;
+
+ metadata_access_enable();
+ nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
+ TRACK_ADDRS_COUNT, 3);
+ metadata_access_disable();
+
+ if (nr_entries < TRACK_ADDRS_COUNT)
+ p->addrs[nr_entries] = 0;
#endif
p->addr = addr;
p->cpu = smp_processor_id();
@@ -673,19 +653,14 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)
pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
-#ifdef CONFIG_STACKDEPOT
+#ifdef CONFIG_STACKTRACE
{
- depot_stack_handle_t handle;
- unsigned long *entries;
- unsigned int nr_entries;
-
- handle = READ_ONCE(t->handle);
- if (!handle) {
- pr_err("object allocation/free stack trace missing\n");
- } else {
- nr_entries = stack_depot_fetch(handle, &entries);
- stack_trace_print(entries, nr_entries, 0);
- }
+ int i;
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+ if (t->addrs[i])
+ pr_err("\t%pS\n", (void *)t->addrs[i]);
+ else
+ break;
}
#endif
}
@@ -4059,26 +4034,18 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
objp = fixup_red_left(s, objp);
trackp = get_track(s, objp, TRACK_ALLOC);
kpp->kp_ret = (void *)trackp->addr;
-#ifdef CONFIG_STACKDEPOT
- {
- depot_stack_handle_t handle;
- unsigned long *entries;
- unsigned int nr_entries;
-
- handle = READ_ONCE(trackp->handle);
- if (handle) {
- nr_entries = stack_depot_fetch(handle, &entries);
- for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
- kpp->kp_stack[i] = (void *)entries[i];
- }
+#ifdef CONFIG_STACKTRACE
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_stack[i])
+ break;
+ }
- trackp = get_track(s, objp, TRACK_FREE);
- handle = READ_ONCE(trackp->handle);
- if (handle) {
- nr_entries = stack_depot_fetch(handle, &entries);
- for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
- kpp->kp_free_stack[i] = (void *)entries[i];
- }
+ trackp = get_track(s, objp, TRACK_FREE);
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_free_stack[i])
+ break;
}
#endif
#endif
diff --git a/mm/util.c b/mm/util.c
index 99c6cc77de9e..9043d03750a7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -731,6 +731,16 @@ int __page_mapcount(struct page *page)
}
EXPORT_SYMBOL_GPL(__page_mapcount);
+void copy_huge_page(struct page *dst, struct page *src)
+{
+ unsigned i, nr = compound_nr(src);
+
+ for (i = 0; i < nr; i++) {
+ cond_resched();
+ copy_highpage(nth_page(dst, i), nth_page(src, i));
+ }
+}
+
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50;
unsigned long sysctl_overcommit_kbytes __read_mostly;
diff --git a/net/802/garp.c b/net/802/garp.c
index 400bd857e5f5..f6012f8e59f0 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
kfree(attr);
}
+static void garp_attr_destroy_all(struct garp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct garp_attr *attr;
+
+ for (node = rb_first(&app->gid);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct garp_attr, node);
+ garp_attr_destroy(app, attr);
+ }
+}
+
static int garp_pdu_init(struct garp_applicant *app)
{
struct sk_buff *skb;
@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
+ garp_attr_destroy_all(app);
garp_pdu_queue(app);
spin_unlock_bh(&app->lock);
diff --git a/net/802/mrp.c b/net/802/mrp.c
index bea6e43d45a0..35e04cc5390c 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
kfree(attr);
}
+static void mrp_attr_destroy_all(struct mrp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct mrp_attr *attr;
+
+ for (node = rb_first(&app->mad);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct mrp_attr, node);
+ mrp_attr_destroy(app, attr);
+ }
+}
+
static int mrp_pdu_init(struct mrp_applicant *app)
{
struct sk_buff *skb;
@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
+ mrp_attr_destroy_all(app);
mrp_pdu_queue(app);
spin_unlock_bh(&app->lock);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f7d2f472ae24..6e4a32354a13 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -562,7 +562,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
struct net_bridge_port *p;
int err = 0;
unsigned br_hr, dev_hr;
- bool changed_addr;
+ bool changed_addr, fdb_synced = false;
/* Don't allow bridging non-ethernet like devices. */
if ((dev->flags & IFF_LOOPBACK) ||
@@ -652,6 +652,19 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
list_add_rcu(&p->list, &br->port_list);
nbp_update_port_count(br);
+ if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
+ /* When updating the port count we also update all ports'
+ * promiscuous mode.
+ * A port leaving promiscuous mode normally gets the bridge's
+ * fdb synced to the unicast filter (if supported), however,
+ * `br_port_clear_promisc` does not distinguish between
+ * non-promiscuous ports and *new* ports, so we need to
+ * sync explicitly here.
+ */
+ fdb_synced = br_fdb_sync_static(br, p) == 0;
+ if (!fdb_synced)
+ netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
+ }
netdev_update_features(br->dev);
@@ -701,6 +714,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
return 0;
err7:
+ if (fdb_synced)
+ br_fdb_unsync_static(br, p);
list_del_rcu(&p->list);
br_fdb_delete_by_port(br, p, 0, 1);
nbp_update_port_count(br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 53c3a9d80d9c..d0434dc8c03b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -3264,7 +3264,9 @@ static void br_multicast_pim(struct net_bridge *br,
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
return;
+ spin_lock(&br->multicast_lock);
br_ip4_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
}
static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
@@ -3275,7 +3277,9 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
return -ENOMSG;
+ spin_lock(&br->multicast_lock);
br_ip4_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
return 0;
}
@@ -3343,7 +3347,9 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
return;
+ spin_lock(&br->multicast_lock);
br_ip6_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
}
static int br_multicast_ipv6_rcv(struct net_bridge *br,
diff --git a/net/core/dev.c b/net/core/dev.c
index c253c2aafe97..64b21f0a2048 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6008,6 +6008,19 @@ static void gro_list_prepare(const struct list_head *head,
diffs = memcmp(skb_mac_header(p),
skb_mac_header(skb),
maclen);
+
+ diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ if (!diffs) {
+ struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+ struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT);
+
+ diffs |= (!!p_ext) ^ (!!skb_ext);
+ if (!diffs && unlikely(skb_ext))
+ diffs |= p_ext->chain ^ skb_ext->chain;
+ }
+#endif
+
NAPI_GRO_CB(p)->same_flow = !diffs;
}
}
@@ -6221,6 +6234,8 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
case GRO_MERGED_FREE:
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
napi_skb_free_stolen_head(skb);
+ else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ __kfree_skb(skb);
else
__kfree_skb_defer(skb);
break;
@@ -6270,6 +6285,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
skb_ext_reset(skb);
+ nf_reset_ct(skb);
napi->skb = skb;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 12aabcda6db2..f63de967ac25 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -943,6 +943,7 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_skb_free_stolen_head(struct sk_buff *skb)
{
+ nf_reset_ct(skb);
skb_dst_drop(skb);
skb_ext_put(skb);
napi_skb_cache_put(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ba1c0f75cd45..a3eea6e0b30a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,6 +139,8 @@
#include <net/tcp.h>
#include <net/busy_poll.h>
+#include <linux/ethtool.h>
+
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
@@ -810,8 +812,47 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
}
}
-int sock_set_timestamping(struct sock *sk, int optname, int val)
+static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
+{
+ struct net *net = sock_net(sk);
+ struct net_device *dev = NULL;
+ bool match = false;
+ int *vclock_index;
+ int i, num;
+
+ if (sk->sk_bound_dev_if)
+ dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+
+ if (!dev) {
+ pr_err("%s: sock not bind to device\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ num = ethtool_get_phc_vclocks(dev, &vclock_index);
+ for (i = 0; i < num; i++) {
+ if (*(vclock_index + i) == phc_index) {
+ match = true;
+ break;
+ }
+ }
+
+ if (num > 0)
+ kfree(vclock_index);
+
+ if (!match)
+ return -EINVAL;
+
+ sk->sk_bind_phc = phc_index;
+
+ return 0;
+}
+
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping)
{
+ int val = timestamping.flags;
+ int ret;
+
if (val & ~SOF_TIMESTAMPING_MASK)
return -EINVAL;
@@ -832,6 +873,12 @@ int sock_set_timestamping(struct sock *sk, int optname, int val)
!(val & SOF_TIMESTAMPING_OPT_TSONLY))
return -EINVAL;
+ if (val & SOF_TIMESTAMPING_BIND_PHC) {
+ ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
+ if (ret)
+ return ret;
+ }
+
sk->sk_tsflags = val;
sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
@@ -907,6 +954,7 @@ EXPORT_SYMBOL(sock_set_mark);
int sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
+ struct so_timestamping timestamping;
struct sock_txtime sk_txtime;
struct sock *sk = sock->sk;
int val;
@@ -1068,12 +1116,22 @@ set_sndbuf:
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
- sock_set_timestamp(sk, valbool, optname);
+ sock_set_timestamp(sk, optname, valbool);
break;
case SO_TIMESTAMPING_NEW:
case SO_TIMESTAMPING_OLD:
- ret = sock_set_timestamping(sk, optname, val);
+ if (optlen == sizeof(timestamping)) {
+ if (copy_from_sockptr(&timestamping, optval,
+ sizeof(timestamping))) {
+ ret = -EFAULT;
+ break;
+ }
+ } else {
+ memset(&timestamping, 0, sizeof(timestamping));
+ timestamping.flags = val;
+ }
+ ret = sock_set_timestamping(sk, optname, timestamping);
break;
case SO_RCVLOWAT:
@@ -1201,7 +1259,7 @@ set_sndbuf:
if (val < 0)
ret = -EINVAL;
else
- sk->sk_ll_usec = val;
+ WRITE_ONCE(sk->sk_ll_usec, val);
}
break;
case SO_PREFER_BUSY_POLL:
@@ -1348,6 +1406,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct __kernel_old_timeval tm;
struct __kernel_sock_timeval stm;
struct sock_txtime txtime;
+ struct so_timestamping timestamping;
} v;
int lv = sizeof(int);
@@ -1451,7 +1510,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_TIMESTAMPING_OLD:
- v.val = sk->sk_tsflags;
+ lv = sizeof(v.timestamping);
+ v.timestamping.flags = sk->sk_tsflags;
+ v.timestamping.bind_phc = sk->sk_bind_phc;
break;
case SO_RCVTIMEO_OLD:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index af71b8638098..5ece05dfd8f2 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -113,11 +113,11 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
int err, port;
if (dst->index == info->tree_index && ds->index == info->sw_index &&
- ds->ops->port_bridge_join)
+ ds->ops->port_bridge_leave)
ds->ops->port_bridge_leave(ds, info->port, info->br);
if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_join)
+ ds->ops->crosschip_bridge_leave)
ds->ops->crosschip_bridge_leave(ds, info->tree_index,
info->sw_index, info->port,
info->br);
@@ -427,7 +427,7 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
info->port, info->lag,
info->info);
- return 0;
+ return -EOPNOTSUPP;
}
static int dsa_switch_lag_leave(struct dsa_switch *ds,
@@ -440,7 +440,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
return ds->ops->crosschip_lag_leave(ds, info->sw_index,
info->port, info->lag);
- return 0;
+ return -EOPNOTSUPP;
}
static int dsa_switch_mdb_add(struct dsa_switch *ds,
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 723c9a8a8cdf..0a19470efbfb 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
- tunnels.o fec.o eeprom.o stats.o
+ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index f9dcbad84788..c63e0739dc6a 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -4,6 +4,7 @@
#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/rtnetlink.h>
+#include <linux/ptp_clock_kernel.h>
#include "common.h"
@@ -397,6 +398,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats",
[const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo",
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
+ [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
@@ -554,6 +556,18 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return 0;
}
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
+{
+ struct ethtool_ts_info info = { };
+ int num = 0;
+
+ if (!__ethtool_get_ts_info(dev, &info))
+ num = ptp_get_vclocks_index(info.phc_index, vclock_index);
+
+ return num;
+}
+EXPORT_SYMBOL(ethtool_get_phc_vclocks);
+
const struct ethtool_phy_ops *ethtool_phy_ops;
void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index a7346346114f..73e0f5b626bf 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -248,6 +248,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_TSINFO_GET] = &ethnl_tsinfo_request_ops,
[ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops,
[ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops,
+ [ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -958,6 +959,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_stats_get_policy,
.maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_phc_vclocks_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 3e25a47fd482..3fc395c86702 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -347,6 +347,7 @@ extern const struct ethnl_request_ops ethnl_tsinfo_request_ops;
extern const struct ethnl_request_ops ethnl_fec_request_ops;
extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops;
extern const struct ethnl_request_ops ethnl_stats_request_ops;
+extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -382,6 +383,7 @@ extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
+extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c
new file mode 100644
index 000000000000..637b2f5297d5
--- /dev/null
+++ b/net/ethtool/phc_vclocks.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 NXP
+ */
+#include "netlink.h"
+#include "common.h"
+
+struct phc_vclocks_req_info {
+ struct ethnl_req_info base;
+};
+
+struct phc_vclocks_reply_data {
+ struct ethnl_reply_data base;
+ int num;
+ int *index;
+};
+
+#define PHC_VCLOCKS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct phc_vclocks_reply_data, base)
+
+const struct nla_policy ethnl_phc_vclocks_get_policy[] = {
+ [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ data->num = ethtool_get_phc_vclocks(dev, &data->index);
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+ int len = 0;
+
+ if (data->num > 0) {
+ len += nla_total_size(sizeof(u32));
+ len += nla_total_size(sizeof(s32) * data->num);
+ }
+
+ return len;
+}
+
+static int phc_vclocks_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+
+ if (data->num <= 0)
+ return 0;
+
+ if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) ||
+ nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX,
+ sizeof(s32) * data->num, data->index))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+
+ kfree(data->index);
+}
+
+const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ .reply_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PHC_VCLOCKS_HEADER,
+ .req_info_size = sizeof(struct phc_vclocks_req_info),
+ .reply_data_size = sizeof(struct phc_vclocks_reply_data),
+
+ .prepare_data = phc_vclocks_prepare_data,
+ .reply_size = phc_vclocks_reply_size,
+ .fill_reply = phc_vclocks_fill_reply,
+ .cleanup_data = phc_vclocks_cleanup_data,
+};
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a933bd6345b1..9fe13e4f5d08 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1376,7 +1376,7 @@ static void nl_fib_input(struct sk_buff *skb)
portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
- netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
+ nlmsg_unicast(net->ipv4.fibnl, skb, portid);
}
static int __net_init nl_fib_lookup_init(struct net *net)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e65f4ef024a4..ef7897226f08 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -580,10 +580,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
nlmsg_free(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
out:
if (sk)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index f6cc26de5ed3..0dca00745ac3 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
dev->needed_headroom = t_hlen + hlen;
- mtu -= t_hlen;
+ mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
@@ -348,6 +348,9 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP_MAX_MTU - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->max_mtu -= dev->hard_header_len;
+
ip_tunnel_add(itn, nt);
return nt;
@@ -489,11 +492,14 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
pkt_size = skb->len - tunnel_hlen;
+ pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
- if (df)
+ if (df) {
mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
- else
+ mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
+ } else {
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ }
if (skb_valid_dst(skb))
skb_dst_update_pmtu_no_confirm(skb, mtu);
@@ -972,6 +978,9 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int max_mtu = IP_MAX_MTU - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ max_mtu -= dev->hard_header_len;
+
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
@@ -1149,6 +1158,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
if (tb[IFLA_MTU]) {
unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
+ if (dev->type == ARPHRD_ETHER)
+ max -= dev->hard_header_len;
+
mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7b12a40dd465..2dda856ca260 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2119,7 +2119,7 @@ int ip_mr_input(struct sk_buff *skb)
raw_rcv(mroute_sk, skb);
return 0;
}
- }
+ }
}
/* already under rcu_read_lock() */
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index 1b5b8af27aaf..ccacbde30a2c 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -119,11 +119,8 @@ static int raw_diag_dump_one(struct netlink_callback *cb,
return err;
}
- err = netlink_unicast(net->diag_nlsk, rep,
- NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
return err;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d5ab5f243640..8cb44040ec68 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1375,6 +1375,9 @@ new_segment:
}
pfrag->offset += copy;
} else {
+ if (!sk_wmem_schedule(sk, copy))
+ goto wait_for_space;
+
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
if (err == -EMSGSIZE || err == -EEXIST) {
tcp_mark_push(tp, skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6ca5a1f3b59..149ceb5c94ff 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
{
trace_tcp_receive_reset(sk);
+ /* mptcp can't tell us to ignore reset pkts,
+ * so just ignore the return value of mptcp_incoming_options().
+ */
if (sk_is_mptcp(sk))
mptcp_incoming_options(sk, skb);
@@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
bool fragstolen;
int eaten;
- if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb);
+ /* If a subflow has been reset, the packet should not continue
+ * to be processed, drop the packet.
+ */
+ if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
+ __kfree_skb(skb);
+ return;
+ }
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
__kfree_skb(skb);
@@ -5922,8 +5930,8 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
tp->snd_cwnd_stamp = tcp_jiffies32;
- icsk->icsk_ca_initialized = 0;
bpf_skops_established(sk, bpf_op, skb);
+ /* Initialize congestion control unless BPF initialized it already: */
if (!icsk->icsk_ca_initialized)
tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
@@ -6523,8 +6531,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_CLOSING:
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb);
+ /* If a subflow has been reset, the packet should not
+ * continue to be processed, drop the packet.
+ */
+ if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
+ goto discard;
break;
}
fallthrough;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e66ad6bfe808..b9dc2d6197be 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
- mtu = tcp_sk(sk)->mtu_info;
+ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
if (sk->sk_state == TCP_LISTEN)
goto out;
- tp->mtu_info = info;
+ WRITE_ONCE(tp->mtu_info, info);
if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bde781f46b41..29553fce8502 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1732,6 +1732,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
return __tcp_mtu_to_mss(sk, pmtu) -
(tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
}
+EXPORT_SYMBOL(tcp_mtu_to_mss);
/* Inverse of above */
int tcp_mss_to_mtu(struct sock *sk, int mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 62682807b4b2..62cd4cd52e84 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1102,7 +1102,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
ipcm_init_sk(&ipc, inet);
- ipc.gso_size = up->gso_size;
+ ipc.gso_size = READ_ONCE(up->gso_size);
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
@@ -2695,7 +2695,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_SEGMENT:
if (val < 0 || val > USHRT_MAX)
return -EINVAL;
- up->gso_size = val;
+ WRITE_ONCE(up->gso_size, val);
break;
case UDP_GRO:
@@ -2790,7 +2790,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
break;
case UDP_SEGMENT:
- val = up->gso_size;
+ val = READ_ONCE(up->gso_size);
break;
case UDP_GRO:
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index b2cee9a307d4..1ed8c4d78e5c 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -77,10 +77,8 @@ static int udp_dump_one(struct udp_table *tbl,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
if (sk)
sock_put(sk);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 54e06b88af69..9dde1e5fb449 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -525,8 +525,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
(sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
- pp = call_gro_receive(udp_gro_receive_segment, head, skb);
- return pp;
+ return call_gro_receive(udp_gro_receive_segment, head, skb);
+
+ /* no GRO, be sure flush the current packet */
+ goto out;
}
if (NAPI_GRO_CB(skb)->encap_mark ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 984050f35c61..01bea76e3891 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
+ unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ int delta = hh_len - skb_headroom(skb);
const struct in6_addr *nexthop;
struct neighbour *neigh;
int ret;
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(delta > 0) && dev->header_ops) {
+ /* pskb_expand_head() might crash, if skb is shared */
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ if (skb->sk)
+ skb_set_owner_w(skb, skb->sk);
+ consume_skb(skb);
+ } else {
+ kfree_skb(skb);
+ }
+ skb = nskb;
+ }
+ if (skb &&
+ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ skb = NULL;
+ }
+ if (!skb) {
+ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
+ return -ENOMEM;
+ }
+ }
+
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
@@ -479,7 +507,9 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
+ if (!net->ipv6.devconf_all->disable_policy &&
+ !idev->cnf.disable_policy &&
+ !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 578ab6305c3f..0ce52d46e4f8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -348,11 +348,20 @@ failure:
static void tcp_v6_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
+ u32 mtu;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
+ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+
+ /* Drop requests trying to increase our current mss.
+ * Check done in __ip6_rt_update_pmtu() is too late.
+ */
+ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
+ return;
+
+ dst = inet6_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
if (type == ICMPV6_PKT_TOOBIG) {
+ u32 mtu = ntohl(info);
+
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
@@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!ip6_sk_accept_pmtu(sk))
goto out;
- tp->mtu_info = ntohl(info);
+ if (mtu < IPV6_MIN_MTU)
+ goto out;
+
+ WRITE_ONCE(tp->mtu_info, mtu);
+
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
@@ -540,7 +555,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
+ err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
tclass, sk->sk_priority);
rcu_read_unlock();
err = net_xmit_eval(err);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 368972dbd919..0cc7ba531b34 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1296,7 +1296,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
ipcm6_init(&ipc6);
- ipc6.gso_size = up->gso_size;
+ ipc6.gso_size = READ_ONCE(up->gso_size);
ipc6.sockc.tsflags = sk->sk_tsflags;
ipc6.sockc.mark = sk->sk_mark;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 57fa27c1cdf9..d0d280077721 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -49,7 +49,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
- int mtu;
+ unsigned int mtu;
bool toobig;
#ifdef CONFIG_NETFILTER
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 349c6ac3313f..e6795d5a546a 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1635,14 +1635,16 @@ struct iucv_message_pending {
u8 iptype;
u32 ipmsgid;
u32 iptrgcls;
- union {
- u32 iprmmsg1_u32;
- u8 iprmmsg1[4];
- } ln1msg1;
- union {
- u32 ipbfln1f;
- u8 iprmmsg2[4];
- } ln1msg2;
+ struct {
+ union {
+ u32 iprmmsg1_u32;
+ u8 iprmmsg1[4];
+ } ln1msg1;
+ union {
+ u32 ipbfln1f;
+ u8 iprmmsg2[4];
+ } ln1msg2;
+ } rmmsg;
u32 res1[3];
u32 ipbfln2f;
u8 ippollfg;
@@ -1660,10 +1662,10 @@ static void iucv_message_pending(struct iucv_irq_data *data)
msg.id = imp->ipmsgid;
msg.class = imp->iptrgcls;
if (imp->ipflags1 & IUCV_IPRMDATA) {
- memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
+ memcpy(msg.rmmsg, &imp->rmmsg, 8);
msg.length = 8;
} else
- msg.length = imp->ln1msg2.ipbfln1f;
+ msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
msg.reply_size = imp->ipbfln2f;
path->handler->message_pending(path, &msg);
}
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 52ea2517e856..ff2cc0e3273d 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -44,6 +44,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
+ SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 193466c9b549..0663cb12b448 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -37,6 +37,7 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
+ MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 8f88ddeab6a2..f48eb6315bbb 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -57,10 +57,8 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
sock_put(sk);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index b5850afea343..4452455aef7f 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -1035,7 +1035,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+/* Return false if a subflow has been reset, else return true */
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
@@ -1053,12 +1054,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
__mptcp_check_push(subflow->conn, sk);
__mptcp_data_acked(subflow->conn);
mptcp_data_unlock(subflow->conn);
- return;
+ return true;
}
mptcp_get_options(sk, skb, &mp_opt);
+
+ /* The subflow can be in close state only if check_fully_established()
+ * just sent a reset. If so, tell the caller to ignore the current packet.
+ */
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
- return;
+ return sk->sk_state != TCP_CLOSE;
if (mp_opt.fastclose &&
msk->local_key == mp_opt.rcvr_key) {
@@ -1100,7 +1105,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
}
if (!mp_opt.dss)
- return;
+ return true;
/* we can't wait for recvmsg() to update the ack_seq, otherwise
* monodirectional flows will stuck
@@ -1119,12 +1124,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
schedule_work(&msk->work))
sock_hold(subflow->conn);
- return;
+ return true;
}
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
- return;
+ return true;
memset(mpext, 0, sizeof(*mpext));
@@ -1153,6 +1158,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (mpext->csum_reqd)
mpext->csum = mp_opt.csum;
}
+
+ return true;
}
static void mptcp_set_rwin(const struct tcp_sock *tp)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7a5afa8c6866..a88924947815 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -474,7 +474,7 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
bool cleanup, rx_empty;
cleanup = (space > 0) && (space >= (old_space << 1));
- rx_empty = !atomic_read(&sk->sk_rmem_alloc);
+ rx_empty = !__mptcp_rmem(sk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -720,8 +720,10 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
+ if (__mptcp_rmem(sk) > sk_rbuf) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
return;
+ }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -1754,7 +1756,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
if (!(flags & MSG_PEEK)) {
/* we will bulk release the skb memory later */
skb->destructor = NULL;
- msk->rmem_released += skb->truesize;
+ WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
__skb_unlink(skb, &msk->receive_queue);
__kfree_skb(skb);
}
@@ -1873,7 +1875,7 @@ static void __mptcp_update_rmem(struct sock *sk)
atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
sk_mem_uncharge(sk, msk->rmem_released);
- msk->rmem_released = 0;
+ WRITE_ONCE(msk->rmem_released, 0);
}
static void __mptcp_splice_receive_queue(struct sock *sk)
@@ -2380,7 +2382,7 @@ static int __mptcp_init_sock(struct sock *sk)
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
msk->wmem_reserved = 0;
- msk->rmem_released = 0;
+ WRITE_ONCE(msk->rmem_released, 0);
msk->tx_pending_data = 0;
msk->first = NULL;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 426ed80fe72f..0f0c026c5f8b 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -296,9 +296,17 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
return (struct mptcp_sock *)sk;
}
+/* the msk socket don't use the backlog, also account for the bulk
+ * free memory
+ */
+static inline int __mptcp_rmem(const struct sock *sk)
+{
+ return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released);
+}
+
static inline int __mptcp_space(const struct sock *sk)
{
- return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released);
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
}
static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 092d1f635d27..8c03afac5ca0 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -157,19 +157,7 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow = lock_sock_fast(ssk);
- switch (optname) {
- case SO_TIMESTAMP_OLD:
- case SO_TIMESTAMP_NEW:
- case SO_TIMESTAMPNS_OLD:
- case SO_TIMESTAMPNS_NEW:
- sock_set_timestamp(sk, optname, !!val);
- break;
- case SO_TIMESTAMPING_NEW:
- case SO_TIMESTAMPING_OLD:
- sock_set_timestamping(sk, optname, val);
- break;
- }
-
+ sock_set_timestamp(sk, optname, !!val);
unlock_sock_fast(ssk, slow);
}
@@ -178,7 +166,8 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
}
static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
- sockptr_t optval, unsigned int optlen)
+ sockptr_t optval,
+ unsigned int optlen)
{
int val, ret;
@@ -205,14 +194,56 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
- case SO_TIMESTAMPING_OLD:
- case SO_TIMESTAMPING_NEW:
return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val);
}
return -ENOPROTOOPT;
}
+static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk,
+ int optname,
+ sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ struct so_timestamping timestamping;
+ int ret;
+
+ if (optlen == sizeof(timestamping)) {
+ if (copy_from_sockptr(&timestamping, optval,
+ sizeof(timestamping)))
+ return -EFAULT;
+ } else if (optlen == sizeof(int)) {
+ memset(&timestamping, 0, sizeof(timestamping));
+
+ if (copy_from_sockptr(&timestamping.flags, optval, sizeof(int)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
+ KERNEL_SOCKPTR(&timestamping),
+ sizeof(timestamping));
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow = lock_sock_fast(ssk);
+
+ sock_set_timestamping(sk, optname, timestamping);
+ unlock_sock_fast(ssk, slow);
+ }
+
+ release_sock(sk);
+
+ return 0;
+}
+
static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval,
unsigned int optlen)
{
@@ -299,9 +330,12 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
+ return mptcp_setsockopt_sol_socket_int(msk, optname, optval,
+ optlen);
case SO_TIMESTAMPING_OLD:
case SO_TIMESTAMPING_NEW:
- return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen);
+ return mptcp_setsockopt_sol_socket_timestamping(msk, optname,
+ optval, optlen);
case SO_LINGER:
return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen);
case SO_RCVLOWAT:
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 66d0b1893d26..966f777d35ce 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -214,11 +214,6 @@ again:
ntohs(inet_sk(sk_listener)->inet_sport),
ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
- sock_put((struct sock *)subflow_req->msk);
- mptcp_token_destroy_request(req);
- tcp_request_sock_ops.destructor(req);
- subflow_req->msk = NULL;
- subflow_req->mp_join = 0;
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
return -EPERM;
}
@@ -230,6 +225,8 @@ again:
if (unlikely(req->syncookie)) {
if (mptcp_can_accept_new_subflow(subflow_req->msk))
subflow_init_req_cookie_join_save(subflow_req, skb);
+ else
+ return -EPERM;
}
pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
@@ -269,9 +266,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
return -EINVAL;
- if (mptcp_can_accept_new_subflow(subflow_req->msk))
- subflow_req->mp_join = 1;
-
+ subflow_req->mp_join = 1;
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
}
diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
index abe0fd099746..37127781aee9 100644
--- a/net/mptcp/syncookies.c
+++ b/net/mptcp/syncookies.c
@@ -37,7 +37,21 @@ static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp
static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net)
{
- u32 i = skb_get_hash(skb) ^ net_hash_mix(net);
+ static u32 mptcp_join_hash_secret __read_mostly;
+ struct tcphdr *th = tcp_hdr(skb);
+ u32 seq, i;
+
+ net_get_random_once(&mptcp_join_hash_secret,
+ sizeof(mptcp_join_hash_secret));
+
+ if (th->syn)
+ seq = TCP_SKB_CB(skb)->seq;
+ else
+ seq = TCP_SKB_CB(skb)->seq - 1;
+
+ i = jhash_3words(seq, net_hash_mix(net),
+ (__force __u32)th->source << 16 | (__force __u32)th->dest,
+ mptcp_join_hash_secret);
return i % ARRAY_SIZE(join_entries);
}
diff --git a/net/ncsi/Kconfig b/net/ncsi/Kconfig
index 93309081f5a4..ea1dd32b6b1f 100644
--- a/net/ncsi/Kconfig
+++ b/net/ncsi/Kconfig
@@ -17,3 +17,9 @@ config NCSI_OEM_CMD_GET_MAC
help
This allows to get MAC address from NCSI firmware and set them back to
controller.
+config NCSI_OEM_CMD_KEEP_PHY
+ bool "Keep PHY Link up"
+ depends on NET_NCSI
+ help
+ This allows to keep PHY link up and prevents any channel resets during
+ the host load.
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index cbbb0de4750a..0b6cfd3b31e0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -78,6 +78,9 @@ enum {
/* OEM Vendor Manufacture ID */
#define NCSI_OEM_MFR_MLX_ID 0x8119
#define NCSI_OEM_MFR_BCM_ID 0x113d
+#define NCSI_OEM_MFR_INTEL_ID 0x157
+/* Intel specific OEM command */
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY 0x20 /* CMD ID for Keep PHY up */
/* Broadcom specific OEM Command */
#define NCSI_OEM_BCM_CMD_GMA 0x01 /* CMD ID for Get MAC */
/* Mellanox specific OEM Command */
@@ -86,6 +89,7 @@ enum {
#define NCSI_OEM_MLX_CMD_SMAF 0x01 /* CMD ID for Set MC Affinity */
#define NCSI_OEM_MLX_CMD_SMAF_PARAM 0x07 /* Parameter for SMAF */
/* OEM Command payload lengths*/
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN 7
#define NCSI_OEM_BCM_CMD_GMA_LEN 12
#define NCSI_OEM_MLX_CMD_GMA_LEN 8
#define NCSI_OEM_MLX_CMD_SMAF_LEN 60
@@ -271,6 +275,7 @@ enum {
ncsi_dev_state_probe_mlx_gma,
ncsi_dev_state_probe_mlx_smaf,
ncsi_dev_state_probe_cis,
+ ncsi_dev_state_probe_keep_phy,
ncsi_dev_state_probe_gvi,
ncsi_dev_state_probe_gc,
ncsi_dev_state_probe_gls,
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index ca04b6df1341..89c7742cd72e 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -689,6 +689,35 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return 0;
}
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+
+static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+{
+ unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+ int ret = 0;
+
+ nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
+
+ memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
+ *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
+
+ data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
+
+ /* PHY Link up attribute */
+ data[6] = 0x1;
+
+ nca->data = data;
+
+ ret = ncsi_xmit_cmd(nca);
+ if (ret)
+ netdev_err(nca->ndp->ndev.dev,
+ "NCSI: Failed to transmit cmd 0x%x during configure\n",
+ nca->type);
+ return ret;
+}
+
+#endif
+
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
/* NCSI OEM Command APIs */
@@ -700,7 +729,7 @@ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
- *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
+ *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
data[5] = NCSI_OEM_BCM_CMD_GMA;
nca->data = data;
@@ -724,7 +753,7 @@ static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
memset(&u, 0, sizeof(u));
- u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+ u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
@@ -747,7 +776,7 @@ static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
int ret = 0;
memset(&u, 0, sizeof(u));
- u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+ u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
@@ -1392,7 +1421,23 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
}
nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ break;
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+ case ncsi_dev_state_probe_keep_phy:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_OEM;
+ nca.package = ndp->active_package->id;
+ nca.channel = 0;
+ ret = ncsi_oem_keep_phy_intel(&nca);
+ if (ret)
+ goto error;
+
+ nd->state = ncsi_dev_state_probe_gvi;
break;
+#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 888ccc2d4e34..d48374894817 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -403,7 +403,7 @@ static int ncsi_rsp_handler_ev(struct ncsi_request *nr)
/* Update to VLAN mode */
cmd = (struct ncsi_cmd_ev_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
- ncm->data[0] = ntohl(cmd->mode);
+ ncm->data[0] = ntohl((__force __be32)cmd->mode);
return 0;
}
@@ -699,12 +699,19 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
return 0;
}
+/* Response handler for Intel card */
+static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
+{
+ return 0;
+}
+
static struct ncsi_rsp_oem_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_request *nr);
} ncsi_rsp_oem_handlers[] = {
{ NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx },
- { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }
+ { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm },
+ { NCSI_OEM_MFR_INTEL_ID, ncsi_rsp_handler_oem_intel }
};
/* Response handler for OEM command */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 96ba19fc8155..83c52df85870 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -149,7 +149,15 @@ static void nf_conntrack_all_lock(void)
spin_lock(&nf_conntrack_locks_all_lock);
- nf_conntrack_locks_all = true;
+ /* For nf_contrack_locks_all, only the latest time when another
+ * CPU will see an update is controlled, by the "release" of the
+ * spin_lock below.
+ * The earliest time is not controlled, an thus KCSAN could detect
+ * a race when nf_conntract_lock() reads the variable.
+ * WRITE_ONCE() is used to ensure the compiler will not
+ * optimize the write.
+ */
+ WRITE_ONCE(nf_conntrack_locks_all, true);
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_lock(&nf_conntrack_locks[i]);
@@ -2457,7 +2465,6 @@ i_see_dead_people:
}
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_conntrack_proto_pernet_fini(net);
nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4e1a9dba7077..e81af33b233b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -218,6 +218,7 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
if (!help)
return 0;
+ rcu_read_lock();
helper = rcu_dereference(help->helper);
if (!helper)
goto out;
@@ -233,9 +234,11 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
nla_nest_end(skb, nest_helper);
out:
+ rcu_read_unlock();
return 0;
nla_put_failure:
+ rcu_read_unlock();
return -1;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 55647409a9be..8f7a9837349c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -697,13 +697,6 @@ void nf_conntrack_proto_pernet_init(struct net *net)
#endif
}
-void nf_conntrack_proto_pernet_fini(struct net *net)
-{
-#ifdef CONFIG_NF_CT_PROTO_GRE
- nf_ct_gre_keymap_flush(net);
-#endif
-}
-
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index db11e403d818..728eeb0aea87 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -55,19 +55,6 @@ static inline struct nf_gre_net *gre_pernet(struct net *net)
return &net->ct.nf_ct_proto.gre;
}
-void nf_ct_gre_keymap_flush(struct net *net)
-{
- struct nf_gre_net *net_gre = gre_pernet(net);
- struct nf_ct_gre_keymap *km, *tmp;
-
- spin_lock_bh(&keymap_lock);
- list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
- list_del_rcu(&km->list);
- kfree_rcu(km, rcu);
- }
- spin_unlock_bh(&keymap_lock);
-}
-
static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
const struct nf_conntrack_tuple *t)
{
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index f7e8baf59b51..3259416f2ea4 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -823,6 +823,22 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
+static bool tcp_can_early_drop(const struct nf_conn *ct)
+{
+ switch (ct->proto.tcp.state) {
+ case TCP_CONNTRACK_FIN_WAIT:
+ case TCP_CONNTRACK_LAST_ACK:
+ case TCP_CONNTRACK_TIME_WAIT:
+ case TCP_CONNTRACK_CLOSE:
+ case TCP_CONNTRACK_CLOSE_WAIT:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -1030,10 +1046,30 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
if (index != TCP_RST_SET)
break;
- if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
+ /* If we are closing, tuple might have been re-used already.
+ * last_index, last_ack, and all other ct fields used for
+ * sequence/window validation are outdated in that case.
+ *
+ * As the conntrack can already be expired by GC under pressure,
+ * just skip validation checks.
+ */
+ if (tcp_can_early_drop(ct))
+ goto in_window;
+
+ /* td_maxack might be outdated if we let a SYN through earlier */
+ if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
+ ct->proto.tcp.last_index != TCP_SYN_SET) {
u32 seq = ntohl(th->seq);
- if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
+ /* If we are not in established state and SEQ=0 this is most
+ * likely an answer to a SYN we let go through above (last_index
+ * can be updated due to out-of-order ACKs).
+ */
+ if (seq == 0 && !nf_conntrack_tcp_established(ct))
+ break;
+
+ if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
+ !tn->tcp_ignore_invalid_rst) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
@@ -1134,6 +1170,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
+
+ if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
+ /* do not renew timeout on SYN retransmit.
+ *
+ * Else port reuse by client or NAT middlebox can keep
+ * entry alive indefinitely (including nat info).
+ */
+ return NF_ACCEPT;
+ }
+
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/
@@ -1155,22 +1201,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-static bool tcp_can_early_drop(const struct nf_conn *ct)
-{
- switch (ct->proto.tcp.state) {
- case TCP_CONNTRACK_FIN_WAIT:
- case TCP_CONNTRACK_LAST_ACK:
- case TCP_CONNTRACK_TIME_WAIT:
- case TCP_CONNTRACK_CLOSE:
- case TCP_CONNTRACK_CLOSE_WAIT:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
@@ -1437,6 +1467,9 @@ void nf_conntrack_tcp_init_net(struct net *net)
*/
tn->tcp_be_liberal = 0;
+ /* If it's non-zero, we turn off RST sequence number check */
+ tn->tcp_ignore_invalid_rst = 0;
+
/* Max number of the retransmitted packets without receiving an (acceptable)
* ACK from the destination. If this number is reached, a shorter timer
* will be started.
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f57a951c9b5e..214d9f9e499b 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -579,6 +579,7 @@ enum nf_ct_sysctl_index {
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
@@ -798,6 +799,14 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
+ .procname = "nf_conntrack_tcp_ignore_invalid_rst",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
[NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
.procname = "nf_conntrack_tcp_max_retrans",
.maxlen = sizeof(u8),
@@ -1004,6 +1013,7 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
+ XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 390d4466567f..de182d1f7c4e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3446,7 +3446,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return 0;
err_destroy_flow_rule:
- nft_flow_rule_destroy(flow);
+ if (flow)
+ nft_flow_rule_destroy(flow);
err_release_rule:
nf_tables_rule_release(&ctx, rule);
err_release_expr:
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
index 913ac45167f2..8088b99f2ee3 100644
--- a/net/netfilter/nft_last.c
+++ b/net/netfilter/nft_last.c
@@ -23,15 +23,21 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
{
struct nft_last_priv *priv = nft_expr_priv(expr);
u64 last_jiffies;
+ u32 last_set = 0;
int err;
- if (tb[NFTA_LAST_MSECS]) {
+ if (tb[NFTA_LAST_SET]) {
+ last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
+ if (last_set == 1)
+ priv->last_set = 1;
+ }
+
+ if (last_set && tb[NFTA_LAST_MSECS]) {
err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
if (err < 0)
return err;
- priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
- priv->last_set = 1;
+ priv->last_jiffies = jiffies - (unsigned long)last_jiffies;
}
return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d233ac4a91b6..380f95aacdec 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2471,7 +2471,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
nlmsg_end(skb, rep);
- netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
+ nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
}
EXPORT_SYMBOL(netlink_ack);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c89c8da99f1a..d4a2db0b2299 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -670,13 +670,13 @@ static bool cmp_key(const struct sw_flow_key *key1,
{
const long *cp1 = (const long *)((const u8 *)key1 + key_start);
const long *cp2 = (const long *)((const u8 *)key2 + key_start);
- long diffs = 0;
int i;
for (i = key_start; i < key_end; i += sizeof(long))
- diffs |= *cp1++ ^ *cp2++;
+ if (*cp1++ ^ *cp2++)
+ return false;
- return diffs == 0;
+ return true;
}
static bool flow_cmp_masked_key(const struct sw_flow *flow,
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index a656baa321fe..1b4b3514c94f 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -322,11 +322,22 @@ err_alloc:
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
{
+ struct flow_block_cb *block_cb, *tmp_cb;
struct tcf_ct_flow_table *ct_ft;
+ struct flow_block *block;
ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
rwork);
nf_flow_table_free(&ct_ft->nf_ft);
+
+ /* Remove any remaining callbacks before cleanup */
+ block = &ct_ft->nf_ft.flow_block;
+ down_write(&ct_ft->nf_ft.flow_block_lock);
+ list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ }
+ up_write(&ct_ft->nf_ft.flow_block_lock);
kfree(ct_ft);
module_put(THIS_MODULE);
@@ -1026,7 +1037,8 @@ do_nat:
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
- nf_conntrack_confirm(skb);
+ if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ goto drop;
}
if (!skip_add)
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 66fe2b82af9a..07b30d0601d7 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -564,7 +564,7 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
/* if there's no entry, it means that the schedule didn't
* start yet, so force all gates to be open, this is in
* accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
- * "AdminGateSates"
+ * "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 493fc01e5d2b..760b367644c1 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -284,10 +284,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
return err;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 3c1fbf38f4f7..ec0f52567c16 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
retval = SCTP_SCOPE_LINK;
} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
+ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
+ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
retval = SCTP_SCOPE_PRIVATE;
} else {
retval = SCTP_SCOPE_GLOBAL;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 6c08e5048d38..b8fa8f1a7277 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1163,7 +1163,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
const struct sctp_transport *transport,
__u32 probe_size)
{
- struct sctp_sender_hb_info hbinfo;
+ struct sctp_sender_hb_info hbinfo = {};
struct sctp_chunk *retval;
retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f23804f21c7..397a6244dd97 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -335,10 +335,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
sctp_assoc_sync_pmtu(t->asoc);
}
- } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
- /* Raise probe_size again after 30 * interval in Search Complete */
- t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
- t->pl.probe_size += SCTP_PL_MIN_STEP;
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ t->pl.raise_count++;
+ if (t->pl.raise_count == 30) {
+ /* Raise probe_size again after 30 * interval in Search Complete */
+ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+ t->pl.probe_size += SCTP_PL_MIN_STEP;
+ }
}
}
diff --git a/net/socket.c b/net/socket.c
index bd9233da2497..0b2dad3bdf7f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -104,6 +104,7 @@
#include <linux/sockios.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>
+#include <linux/ptp_clock_kernel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
@@ -873,12 +874,18 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
- !skb_is_swtx_tstamp(skb, false_tstamp) &&
- ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
- empty = 0;
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
- !skb_is_err_queue(skb))
- put_ts_pktinfo(msg, skb);
+ !skb_is_swtx_tstamp(skb, false_tstamp)) {
+ if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc);
+
+ if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp,
+ tss.ts + 2)) {
+ empty = 0;
+
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ !skb_is_err_queue(skb))
+ put_ts_pktinfo(msg, skb);
+ }
}
if (!empty) {
if (sock_flag(sk, SOCK_TSTAMP_NEW))
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 9ff64f9df1f3..7e7d7f45685a 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -295,10 +295,8 @@ again:
goto again;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
if (sk)
sock_put(sk);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 520434ea966f..036998d11ded 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -331,6 +331,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -fno-asynchronous-unwind-tables \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
$(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 53e300f860bb..33d0bdebbed8 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -96,6 +96,7 @@ static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
static int opt_timeout = 1000;
static bool opt_need_wakeup = true;
static u32 opt_num_xsks = 1;
+static u32 prog_id;
static bool opt_busy_poll;
static bool opt_reduced_cap;
@@ -461,6 +462,23 @@ static void *poller(void *arg)
return NULL;
}
+static void remove_xdp_program(void)
+{
+ u32 curr_prog_id = 0;
+
+ if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given interface\n");
+ else
+ printf("program on interface changed, not removing\n");
+}
+
static void int_exit(int sig)
{
benchmark_done = true;
@@ -471,6 +489,9 @@ static void __exit_with_error(int error, const char *file, const char *func,
{
fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
line, error, strerror(error));
+
+ if (opt_num_xsks > 1)
+ remove_xdp_program();
exit(EXIT_FAILURE);
}
@@ -490,6 +511,9 @@ static void xdpsock_cleanup(void)
if (write(sock, &cmd, sizeof(int)) < 0)
exit_with_error(errno);
}
+
+ if (opt_num_xsks > 1)
+ remove_xdp_program();
}
static void swap_mac_addresses(void *data)
@@ -857,6 +881,10 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
if (ret)
exit_with_error(-ret);
+ ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
+ if (ret)
+ exit_with_error(-ret);
+
xsk->app_stats.rx_empty_polls = 0;
xsk->app_stats.fill_fail_polls = 0;
xsk->app_stats.copy_tx_sendtos = 0;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 10b2f2380d6f..02197cb8e3a7 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -386,7 +386,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y)
cmd_update_lto_symversions = \
rm -f $@.symversions \
$(foreach n, $(filter-out FORCE,$^), \
- $(if $(wildcard $(n).symversions), \
+ $(if $(shell test -s $(n).symversions && echo y), \
; cat $(n).symversions >> $@.symversions))
else
cmd_update_lto_symversions = echo >/dev/null
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 151f04971faa..6b54e46a0f12 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -131,11 +131,14 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}"
if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then
# full scm version string
res="$res$(scm_version)"
-elif [ -z "${LOCALVERSION}" ]; then
- # append a plus sign if the repository is not in a clean
- # annotated or signed tagged state (as git describe only
- # looks at signed or annotated tags - git tag -a/-s) and
- # LOCALVERSION= is not specified
+elif [ "${LOCALVERSION+set}" != "set" ]; then
+ # If the variable LOCALVERSION is not set, append a plus
+ # sign if the repository is not in a clean annotated or
+ # signed tagged state (as git describe only looks at signed
+ # or annotated tags - git tag -a/-s).
+ #
+ # If the variable LOCALVERSION is set (including being set
+ # to an empty string), we don't want to append a plus sign.
scm=$(scm_version --short)
res="$res${scm:++}"
fi
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 3e784cf9f401..ebd06ae642c9 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -44,7 +44,7 @@ def read_spdxdata(repo):
continue
exception = None
- for l in open(el.path).readlines():
+ for l in open(el.path, encoding="utf-8").readlines():
if l.startswith('Valid-License-Identifier:'):
lid = l.split(':')[1].strip().upper()
if lid in spdx.licenses:
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
index 2b758a18c2ea..5b8a274419ed 100644
--- a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
@@ -341,6 +341,7 @@ static int set_mtkaif_rx(struct mtk_base_afe *afe)
case MT8183_MTKAIF_PROTOCOL_1:
regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0);
+ break;
default:
break;
}
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index f83a70e07df8..ce2ee8f1e361 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -20,5 +20,6 @@
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 39bb322707b4..b11cfc86a3d0 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean resolve_btfids_clean
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
$(Q)$(RM) -r -- $(OUTPUT)feature
-install: $(PROGS) bpftool_install runqslower_install
+install: $(PROGS) bpftool_install
$(call QUIET_INSTALL, bpf_jit_disasm)
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
$(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
@@ -118,9 +118,6 @@ bpftool_clean:
runqslower:
$(call descend,runqslower)
-runqslower_install:
- $(call descend,runqslower,install)
-
runqslower_clean:
$(call descend,runqslower,clean)
@@ -131,5 +128,5 @@ resolve_btfids_clean:
$(call descend,resolve_btfids,clean)
.PHONY: all install clean bpftool bpftool_install bpftool_clean \
- runqslower runqslower_install runqslower_clean \
+ runqslower runqslower_clean \
resolve_btfids resolve_btfids_clean
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index e7e7eee9f172..24734f2249d6 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -43,11 +43,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
{
va_list ap;
char *s;
+ int err;
va_start(ap, fmt);
- if (vasprintf(&s, fmt, ap) < 0)
- return -1;
+ err = vasprintf(&s, fmt, ap);
va_end(ap);
+ if (err < 0)
+ return -1;
if (!oper_count) {
int i;
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 645530ca7e98..ab9353f2fd46 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -74,7 +74,7 @@ int handle__sched_switch(u64 *ctx)
u32 pid;
/* ivcsw: treat like an enqueue event and store timestamp */
- if (prev->state == TASK_RUNNING)
+ if (prev->__state == TASK_RUNNING)
trace_enqueue(prev);
pid = next->pid;
diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h
index 1555a0c4f345..13b86bd3b746 100644
--- a/tools/include/linux/kconfig.h
+++ b/tools/include/linux/kconfig.h
@@ -4,12 +4,6 @@
/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __BIG_ENDIAN 4321
-#else
-#define __LITTLE_ENDIAN 1234
-#endif
-
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index f211961ce1da..a9d6fcd95f42 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -873,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
#define __NR_landlock_restrict_self 446
__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+
#undef __NR_syscalls
-#define __NR_syscalls 447
+#define __NR_syscalls 448
/*
* 32 bit systems traditionally used different
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1e04ce724240..6f5e2757bb3c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10136,7 +10136,7 @@ int bpf_link__unpin(struct bpf_link *link)
err = unlink(link->pin_path);
if (err != 0)
- return libbpf_err_errno(err);
+ return -errno;
pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
zfree(&link->pin_path);
@@ -11197,7 +11197,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
if (cnt < 0)
- return libbpf_err_errno(cnt);
+ return -errno;
for (i = 0; i < cnt; i++) {
struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index af973e400053..f6b57799c1ea 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -368,6 +368,7 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 5d6f583e2cd3..c88c61e7f8cc 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -361,9 +361,10 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
dso = machine__findnew_dso_id(machine, filename, id);
}
- if (dso)
+ if (dso) {
+ nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
- else
+ } else
nsinfo__put(nsi);
thread__put(thread);
@@ -992,8 +993,10 @@ int cmd_inject(int argc, const char **argv)
data.path = inject.input_name;
inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
- if (IS_ERR(inject.session))
- return PTR_ERR(inject.session);
+ if (IS_ERR(inject.session)) {
+ ret = PTR_ERR(inject.session);
+ goto out_close_output;
+ }
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
@@ -1035,6 +1038,8 @@ int cmd_inject(int argc, const char **argv)
out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
+out_close_output:
+ perf_data__close(&inject.output);
free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6386af6a2612..dc0364f671b9 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1175,6 +1175,8 @@ int cmd_report(int argc, const char **argv)
.annotation_opts = annotation__default_options,
.skip_empty = true,
};
+ char *sort_order_help = sort_help("sort by key(s):");
+ char *field_order_help = sort_help("output field(s): overhead period sample ");
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -1209,9 +1211,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- sort_help("sort by key(s):")),
+ sort_order_help),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- sort_help("output field(s): overhead period sample ")),
+ field_order_help),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1344,11 +1346,11 @@ int cmd_report(int argc, const char **argv)
char sort_tmp[128];
if (ret < 0)
- return ret;
+ goto exit;
ret = perf_config(report__config, &report);
if (ret)
- return ret;
+ goto exit;
argc = parse_options(argc, argv, options, report_usage, 0);
if (argc) {
@@ -1362,8 +1364,10 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
- if (annotate_check_args(&report.annotation_opts) < 0)
- return -EINVAL;
+ if (annotate_check_args(&report.annotation_opts) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
if (report.mmaps_mode)
report.tasks_mode = true;
@@ -1377,12 +1381,14 @@ int cmd_report(int argc, const char **argv)
if (symbol_conf.vmlinux_name &&
access(symbol_conf.vmlinux_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
if (symbol_conf.kallsyms_name &&
access(symbol_conf.kallsyms_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
if (report.inverted_callchain)
@@ -1406,12 +1412,14 @@ int cmd_report(int argc, const char **argv)
repeat:
session = perf_session__new(&data, false, &report.tool);
- if (IS_ERR(session))
- return PTR_ERR(session);
+ if (IS_ERR(session)) {
+ ret = PTR_ERR(session);
+ goto exit;
+ }
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
if (ret)
- return ret;
+ goto exit;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
@@ -1646,5 +1654,8 @@ error:
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
+exit:
+ free(sort_order_help);
+ free(field_order_help);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 954ce2f594e9..1ff10d4bccf3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
+ (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
@@ -3335,6 +3335,16 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
sort_dimension__add("pid", &sched->cmp_pid);
}
+static bool schedstat_events_exposed(void)
+{
+ /*
+ * Select "sched:sched_stat_wait" event to check
+ * whether schedstat tracepoints are exposed.
+ */
+ return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
+ false : true;
+}
+
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
@@ -3346,21 +3356,33 @@ static int __cmd_record(int argc, const char **argv)
"-m", "1024",
"-c", "1",
"-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime",
"-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup_new",
"-e", "sched:sched_migrate_task",
};
+
+ /*
+ * The tracepoints trace_sched_stat_{wait, sleep, iowait}
+ * are not exposed to user if CONFIG_SCHEDSTATS is not set,
+ * to prevent "perf sched record" execution failure, determine
+ * whether to record schedstat events according to actual situation.
+ */
+ const char * const schedstat_args[] = {
+ "-e", "sched:sched_stat_wait",
+ "-e", "sched:sched_stat_sleep",
+ "-e", "sched:sched_stat_iowait",
+ };
+ unsigned int schedstat_argc = schedstat_events_exposed() ?
+ ARRAY_SIZE(schedstat_args) : 0;
+
struct tep_event *waking_event;
/*
* +2 for either "-e", "sched:sched_wakeup" or
* "-e", "sched:sched_waking"
*/
- rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1;
+ rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
@@ -3376,6 +3398,9 @@ static int __cmd_record(int argc, const char **argv)
else
rec_argv[i++] = strdup("sched:sched_wakeup");
+ for (j = 0; j < schedstat_argc; j++)
+ rec_argv[i++] = strdup(schedstat_args[j]);
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 8c03a9862872..064da7f3618d 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2601,6 +2601,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
}
}
+static void perf_script__exit(struct perf_script *script)
+{
+ perf_thread_map__put(script->threads);
+ perf_cpu_map__put(script->cpus);
+}
+
static int __cmd_script(struct perf_script *script)
{
int ret;
@@ -4143,8 +4149,10 @@ out_delete:
zfree(&script.ptime_range);
}
+ zstd_fini(&(session->zstd_data));
evlist__free_stats(session->evlist);
perf_session__delete(session);
+ perf_script__exit(&script);
if (script_started)
cleanup_scripting();
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d25cb8088e8c..634375937db9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2445,9 +2445,6 @@ int cmd_stat(int argc, const char **argv)
evlist__check_cpu_maps(evsel_list);
- if (perf_pmu__has_hybrid())
- stat_config.no_merge = true;
-
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 7ec18ff57fc4..9c265fa96011 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2266,6 +2266,14 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
return augmented_args;
}
+static void syscall__exit(struct syscall *sc)
+{
+ if (!sc)
+ return;
+
+ free(sc->arg_fmt);
+}
+
static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -3095,6 +3103,21 @@ static struct evsel *evsel__new_pgfault(u64 config)
return evsel;
}
+static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct evsel_trace *et = evsel->priv;
+
+ if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+ continue;
+
+ free(et->fmt);
+ free(et);
+ }
+}
+
static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
{
const u32 type = event->header.type;
@@ -4130,7 +4153,7 @@ out_disable:
out_delete_evlist:
trace__symbols__exit(trace);
-
+ evlist__free_syscall_tp_fields(evlist);
evlist__delete(evlist);
cgroup__put(trace->cgroup);
trace->evlist = NULL;
@@ -4636,6 +4659,9 @@ do_concat:
err = parse_events_option(&o, lists[0], 0);
}
out:
+ free(strace_groups_dir);
+ free(lists[0]);
+ free(lists[1]);
if (sep)
*sep = ',';
@@ -4701,6 +4727,21 @@ out:
return err;
}
+static void trace__exit(struct trace *trace)
+{
+ int i;
+
+ strlist__delete(trace->ev_qualifier);
+ free(trace->ev_qualifier_ids.entries);
+ if (trace->syscalls.table) {
+ for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
+ syscall__exit(&trace->syscalls.table[i]);
+ free(trace->syscalls.table);
+ }
+ syscalltbl__delete(trace->sctbl);
+ zfree(&trace->perfconfig_events);
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -5135,6 +5176,6 @@ out_close:
if (output_name != NULL)
fclose(trace.output);
out:
- zfree(&trace.perfconfig_events);
+ trace__exit(&trace);
return err;
}
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 33bda9c26542..dbf5f5215abe 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdio.h>
+#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -276,6 +277,7 @@ static int __test__bpf(int idx)
}
out:
+ free(obj_buf);
bpf__clear();
return ret;
}
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 656218179222..44a50527f9d9 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -88,6 +88,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
struct evsel *evsel;
struct event_name tmp;
struct evlist *evlist = evlist__new_default();
+ char *unit = strdup("KRAVA");
TEST_ASSERT_VAL("failed to get evlist", evlist);
@@ -98,7 +99,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
- evsel->unit = strdup("KRAVA");
+ evsel->unit = unit;
TEST_ASSERT_VAL("failed to synthesize attr update unit",
!perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
@@ -118,6 +119,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
- perf_cpu_map__put(evsel->core.own_cpus);
+ free(unit);
+ evlist__delete(evlist);
return 0;
}
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 5ebf56331904..4e09f0a312af 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -5,6 +5,7 @@
#include "tests.h"
#include "debug.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#include <errno.h>
#include <linux/kernel.h>
@@ -102,7 +103,7 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int
{
int err = 0, ret = 0;
- if (perf_pmu__has_hybrid())
+ if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
return perf_evsel__name_array_test(evsel__hw_names, 2);
err = perf_evsel__name_array_test(evsel__hw_names, 1);
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index edcbc70ff9d6..1ac72919fa35 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -116,5 +116,7 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus
ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps);
TEST_ASSERT_VAL("merge check failed", !ret);
+
+ maps__exit(&maps);
return TEST_OK;
}
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 56a7b6a14195..8d4866739255 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -6,6 +6,7 @@
#include "tests.h"
#include "debug.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#include <dirent.h>
#include <errno.h>
#include <sys/types.h>
@@ -1596,6 +1597,13 @@ static int test__hybrid_raw1(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ if (!perf_pmu__hybrid_mounted("cpu_atom")) {
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return 0;
+ }
+
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
@@ -1620,13 +1628,9 @@ static int test__hybrid_cache_event(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
-
- evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff));
return 0;
}
@@ -2028,7 +2032,7 @@ static struct evlist_test test__hybrid_events[] = {
.id = 7,
},
{
- .name = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/",
+ .name = "cpu_core/LLC-loads/",
.check = test__hybrid_cache_event,
.id = 8,
},
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 85d75b9b25a1..7c56bc1f4cff 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -21,6 +21,7 @@
#include "mmap.h"
#include "tests.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#define CHECK__(x) { \
while ((x) < 0) { \
@@ -93,7 +94,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
* For hybrid "cycles:u", it creates two events.
* Init the second evsel here.
*/
- if (perf_pmu__has_hybrid()) {
+ if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
evsel = evsel__next(evsel);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index ec4e3b21b831..b5efe675b321 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -61,6 +61,7 @@ static int session_write_header(char *path)
TEST_ASSERT_VAL("failed to write header",
!perf_session__write_header(session, session->evlist, data.file.fd, true));
+ evlist__delete(session->evlist);
perf_session__delete(session);
return 0;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 32ad92d3e454..22f8326547eb 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -2683,6 +2683,172 @@ static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
return metadata;
}
+/**
+ * Puts a fragment of an auxtrace buffer into the auxtrace queues based
+ * on the bounds of aux_event, if it matches with the buffer that's at
+ * file_offset.
+ *
+ * Normally, whole auxtrace buffers would be added to the queue. But we
+ * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
+ * is reset across each buffer, so splitting the buffers up in advance has
+ * the same effect.
+ */
+static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
+ struct perf_record_aux *aux_event, struct perf_sample *sample)
+{
+ int err;
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ union perf_event *auxtrace_event_union;
+ struct perf_record_auxtrace *auxtrace_event;
+ union perf_event auxtrace_fragment;
+ __u64 aux_offset, aux_size;
+
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+
+ /*
+ * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
+ * from looping through the auxtrace index.
+ */
+ err = perf_session__peek_event(session, file_offset, buf,
+ PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
+ if (err)
+ return err;
+ auxtrace_event = &auxtrace_event_union->auxtrace;
+ if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
+ return -EINVAL;
+
+ if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
+ auxtrace_event->header.size != sz) {
+ return -EINVAL;
+ }
+
+ /*
+ * In per-thread mode, CPU is set to -1, but TID will be set instead. See
+ * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
+ */
+ if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
+ auxtrace_event->cpu != sample->cpu)
+ return 1;
+
+ if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
+ /*
+ * Clamp size in snapshot mode. The buffer size is clamped in
+ * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
+ * the buffer size.
+ */
+ aux_size = min(aux_event->aux_size, auxtrace_event->size);
+
+ /*
+ * In this mode, the head also points to the end of the buffer so aux_offset
+ * needs to have the size subtracted so it points to the beginning as in normal mode
+ */
+ aux_offset = aux_event->aux_offset - aux_size;
+ } else {
+ aux_size = aux_event->aux_size;
+ aux_offset = aux_event->aux_offset;
+ }
+
+ if (aux_offset >= auxtrace_event->offset &&
+ aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
+ /*
+ * If this AUX event was inside this buffer somewhere, create a new auxtrace event
+ * based on the sizes of the aux event, and queue that fragment.
+ */
+ auxtrace_fragment.auxtrace = *auxtrace_event;
+ auxtrace_fragment.auxtrace.size = aux_size;
+ auxtrace_fragment.auxtrace.offset = aux_offset;
+ file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
+
+ pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
+ return auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
+ file_offset, NULL);
+ }
+
+ /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
+ return 1;
+}
+
+static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
+ u64 offset __maybe_unused, void *data __maybe_unused)
+{
+ struct perf_sample sample;
+ int ret;
+ struct auxtrace_index_entry *ent;
+ struct auxtrace_index *auxtrace_index;
+ struct evsel *evsel;
+ size_t i;
+
+ /* Don't care about any other events, we're only queuing buffers for AUX events */
+ if (event->header.type != PERF_RECORD_AUX)
+ return 0;
+
+ if (event->header.size < sizeof(struct perf_record_aux))
+ return -EINVAL;
+
+ /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
+ if (!event->aux.aux_size)
+ return 0;
+
+ /*
+ * Parse the sample, we need the sample_id_all data that comes after the event so that the
+ * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
+ */
+ evsel = evlist__event2evsel(session->evlist, event);
+ if (!evsel)
+ return -EINVAL;
+ ret = evsel__parse_sample(evsel, event, &sample);
+ if (ret)
+ return ret;
+
+ /*
+ * Loop through the auxtrace index to find the buffer that matches up with this aux event.
+ */
+ list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
+ for (i = 0; i < auxtrace_index->nr; i++) {
+ ent = &auxtrace_index->entries[i];
+ ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
+ ent->sz, &event->aux, &sample);
+ /*
+ * Stop search on error or successful values. Continue search on
+ * 1 ('not found')
+ */
+ if (ret != 1)
+ return ret;
+ }
+ }
+
+ /*
+ * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
+ * don't exit with an error because it will still be possible to decode other aux records.
+ */
+ pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
+ return 0;
+}
+
+static int cs_etm__queue_aux_records(struct perf_session *session)
+{
+ struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
+ struct auxtrace_index, list);
+ if (index && index->nr > 0)
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ cs_etm__queue_aux_records_cb, NULL);
+
+ /*
+ * We would get here if there are no entries in the index (either no auxtrace
+ * buffers or no index at all). Fail silently as there is the possibility of
+ * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
+ * false.
+ *
+ * In that scenario, buffers will not be split by AUX records.
+ */
+ return 0;
+}
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -2883,7 +3049,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
if (err)
goto err_delete_thread;
- err = auxtrace_queues__process_index(&etm->queues, session);
+ err = cs_etm__queue_aux_records(session);
if (err)
goto err_delete_thread;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index a9c102e8e3c0..f5d260b1df4d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -20,7 +20,7 @@
static void close_dir(struct perf_data_file *files, int nr)
{
- while (--nr >= 1) {
+ while (--nr >= 0) {
close(files[nr].fd);
zfree(&files[nr].path);
}
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d786cf6b0cfa..ee15db2be2f4 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1154,8 +1154,10 @@ struct map *dso__new_map(const char *name)
struct map *map = NULL;
struct dso *dso = dso__new(name);
- if (dso)
+ if (dso) {
map = map__new2(0, dso);
+ dso__put(dso);
+ }
return map;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7d2ba8419b0c..609ca1671501 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -113,14 +113,14 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
*
* Find a line number and file name for @addr in @cu_die.
*/
-int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
- const char **fname, int *lineno)
+int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ const char **fname, int *lineno)
{
Dwarf_Line *line;
Dwarf_Die die_mem;
Dwarf_Addr faddr;
- if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem)
+ if (die_find_realfunc(cu_die, addr, &die_mem)
&& die_entrypc(&die_mem, &faddr) == 0 &&
faddr == addr) {
*fname = dwarf_decl_file(&die_mem);
@@ -128,7 +128,7 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
goto out;
}
- line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ line = cu_getsrc_die(cu_die, addr);
if (line && dwarf_lineno(line, lineno) == 0) {
*fname = dwarf_linesrc(line, NULL, NULL);
if (!*fname)
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index cb99646843a9..7ee0fa19b5c4 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -19,7 +19,7 @@ const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
const char *cu_get_comp_dir(Dwarf_Die *cu_die);
/* Get a line number and file name for given address */
-int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+int cu_find_lineinfo(Dwarf_Die *cudie, Dwarf_Addr addr,
const char **fname, int *lineno);
/* Walk on functions at given address */
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index ebc5e9ad35db..cec2e6cad8aa 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -186,10 +186,12 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->cpuid);
zfree(&env->cmdline);
zfree(&env->cmdline_argv);
+ zfree(&env->sibling_dies);
zfree(&env->sibling_cores);
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->cpu_pmu_caps);
zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index 39062df02629..51424cdc3b68 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (ferror(infile)) {
pr_err("lzma: read error: %s\n", strerror(errno));
- goto err_fclose;
+ goto err_lzma_end;
}
if (feof(infile))
@@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (writen(output_fd, buf_out, write_size) != write_size) {
pr_err("lzma: write error: %s\n", strerror(errno));
- goto err_fclose;
+ goto err_lzma_end;
}
strm.next_out = buf_out;
@@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd)
break;
pr_err("lzma: failed %s\n", lzma_strerror(ret));
- goto err_fclose;
+ goto err_lzma_end;
}
}
err = 0;
+err_lzma_end:
+ lzma_end(&strm);
err_fclose:
fclose(infile);
return err;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 8af693d9678c..72e7f3616157 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -192,6 +192,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
if (!(prot & PROT_EXEC))
dso__set_loaded(dso);
}
+
+ nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
if (build_id__is_defined(bid))
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index dd9ed56e0504..756295dedccc 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -99,7 +99,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
grp_leader = evsel;
if (grp_evt > -1) {
- evsel->leader = grp_leader;
+ evsel__set_leader(evsel, grp_leader);
grp_leader->core.nr_members++;
grp_evt++;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 44b90d638ad5..a1bd7007a8b4 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -950,6 +950,13 @@ static struct perf_pmu *pmu_lookup(const char *name)
LIST_HEAD(format);
LIST_HEAD(aliases);
__u32 type;
+ bool is_hybrid = perf_pmu__hybrid_mounted(name);
+
+ /*
+ * Check pmu name for hybrid and the pmu may be invalid in sysfs
+ */
+ if (!strncmp(name, "cpu_", 4) && !is_hybrid)
+ return NULL;
/*
* The pmu data we store & need consists of the pmu
@@ -978,7 +985,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
- pmu->is_hybrid = perf_pmu__hybrid_mounted(name);
+ pmu->is_hybrid = is_hybrid;
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index c14e1d228e56..b2a02c9ab8ea 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -179,8 +179,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
struct map *map;
map = dso__new_map(target);
- if (map && map->dso)
+ if (map && map->dso) {
+ nsinfo__put(map->dso->nsinfo);
map->dso->nsinfo = nsinfo__get(nsi);
+ }
return map;
} else {
return kernel_get_module_map(target);
@@ -237,8 +239,8 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
clear_probe_trace_event(tevs + i);
}
-static bool kprobe_blacklist__listed(unsigned long address);
-static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
+static bool kprobe_blacklist__listed(u64 address);
+static bool kprobe_warn_out_range(const char *symbol, u64 address)
{
struct map *map;
bool ret = false;
@@ -398,8 +400,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
pr_debug("Symbol %s address found : %" PRIx64 "\n",
pp->function, address);
- ret = debuginfo__find_probe_point(dinfo, (unsigned long)address,
- result);
+ ret = debuginfo__find_probe_point(dinfo, address, result);
if (ret <= 0)
ret = (!ret) ? -ENOENT : ret;
else {
@@ -587,7 +588,7 @@ static void debuginfo_cache__exit(void)
}
-static int get_text_start_address(const char *exec, unsigned long *address,
+static int get_text_start_address(const char *exec, u64 *address,
struct nsinfo *nsi)
{
Elf *elf;
@@ -632,7 +633,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
bool is_kprobe)
{
struct debuginfo *dinfo = NULL;
- unsigned long stext = 0;
+ u64 stext = 0;
u64 addr = tp->address;
int ret = -ENOENT;
@@ -660,8 +661,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
dinfo = debuginfo_cache__open(tp->module, verbose <= 0);
if (dinfo)
- ret = debuginfo__find_probe_point(dinfo,
- (unsigned long)addr, pp);
+ ret = debuginfo__find_probe_point(dinfo, addr, pp);
else
ret = -ENOENT;
@@ -676,7 +676,7 @@ error:
/* Adjust symbol name and address */
static int post_process_probe_trace_point(struct probe_trace_point *tp,
- struct map *map, unsigned long offs)
+ struct map *map, u64 offs)
{
struct symbol *sym;
u64 addr = tp->address - offs;
@@ -719,7 +719,7 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *pathname)
{
struct map *map;
- unsigned long stext = 0;
+ u64 stext = 0;
int i, ret = 0;
/* Prepare a map for offline binary */
@@ -745,7 +745,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
struct nsinfo *nsi)
{
int i, ret = 0;
- unsigned long stext = 0;
+ u64 stext = 0;
if (!exec)
return 0;
@@ -790,7 +790,7 @@ post_process_module_probe_trace_events(struct probe_trace_event *tevs,
mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
ret = post_process_probe_trace_point(&tevs[i].point,
- map, (unsigned long)text_offs);
+ map, text_offs);
if (ret < 0)
break;
tevs[i].point.module =
@@ -1534,7 +1534,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
* so tmp[1] should always valid (but could be '\0').
*/
if (tmp && !strncmp(tmp, "0x", 2)) {
- pp->abs_address = strtoul(pp->function, &tmp, 0);
+ pp->abs_address = strtoull(pp->function, &tmp, 0);
if (*tmp != '\0') {
semantic_error("Invalid absolute address.\n");
return -EINVAL;
@@ -1909,7 +1909,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
argv[i] = NULL;
argc -= 1;
} else
- tp->address = strtoul(fmt1_str, NULL, 0);
+ tp->address = strtoull(fmt1_str, NULL, 0);
} else {
/* Only the symbol-based probe has offset */
tp->symbol = strdup(fmt1_str);
@@ -2155,7 +2155,7 @@ synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
return -EINVAL;
/* Use the tp->address for uprobes */
- err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address);
+ err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address);
if (err >= 0 && tp->ref_ctr_offset) {
if (!uprobe_ref_ctr_is_supported())
@@ -2170,7 +2170,7 @@ synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
{
if (!strncmp(tp->symbol, "0x", 2)) {
/* Absolute address. See try_to_find_absolute_address() */
- return strbuf_addf(buf, "%s%s0x%lx", tp->module ?: "",
+ return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "",
tp->module ? ":" : "", tp->address);
} else {
return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "",
@@ -2269,7 +2269,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp,
pp->function = strdup(tp->symbol);
pp->offset = tp->offset;
} else {
- ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+ ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address);
if (ret < 0)
return ret;
pp->function = strdup(buf);
@@ -2450,8 +2450,8 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
struct kprobe_blacklist_node {
struct list_head list;
- unsigned long start;
- unsigned long end;
+ u64 start;
+ u64 end;
char *symbol;
};
@@ -2496,7 +2496,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
}
INIT_LIST_HEAD(&node->list);
list_add_tail(&node->list, blacklist);
- if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) {
+ if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) {
ret = -EINVAL;
break;
}
@@ -2512,7 +2512,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
ret = -ENOMEM;
break;
}
- pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n",
+ pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n",
node->start, node->end, node->symbol);
ret++;
}
@@ -2524,8 +2524,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
}
static struct kprobe_blacklist_node *
-kprobe_blacklist__find_by_address(struct list_head *blacklist,
- unsigned long address)
+kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address)
{
struct kprobe_blacklist_node *node;
@@ -2553,7 +2552,7 @@ static void kprobe_blacklist__release(void)
kprobe_blacklist__delete(&kprobe_blacklist);
}
-static bool kprobe_blacklist__listed(unsigned long address)
+static bool kprobe_blacklist__listed(u64 address)
{
return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address);
}
@@ -3221,7 +3220,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
* In __add_probe_trace_events, a NULL symbol is interpreted as
* invalid.
*/
- if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
+ if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0)
goto errout;
/* For kprobe, check range */
@@ -3232,7 +3231,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
goto errout;
}
- if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0)
+ if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0)
goto errout;
if (pev->target) {
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 65769d7949a3..8ad5b1579f1d 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -33,7 +33,7 @@ struct probe_trace_point {
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
unsigned long ref_ctr_offset; /* SDT reference counter offset */
- unsigned long address; /* Actual address of the trace point */
+ u64 address; /* Actual address of the trace point */
bool retprobe; /* Return probe flag */
};
@@ -70,7 +70,7 @@ struct perf_probe_point {
bool retprobe; /* Return probe flag */
char *lazy_line; /* Lazy matching pattern */
unsigned long offset; /* Offset from function entry */
- unsigned long abs_address; /* Absolute address of the point */
+ u64 abs_address; /* Absolute address of the point */
};
/* Perf probe probing argument field chain */
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index f9a6cbcd6415..3d50de3217d5 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -377,11 +377,11 @@ int probe_file__del_events(int fd, struct strfilter *filter)
ret = probe_file__get_events(fd, filter, namelist);
if (ret < 0)
- return ret;
+ goto out;
ret = probe_file__del_strlist(fd, namelist);
+out:
strlist__delete(namelist);
-
return ret;
}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 02ef0d78053b..50d861a80f57 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -668,7 +668,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
}
tp->offset = (unsigned long)(paddr - eaddr);
- tp->address = (unsigned long)paddr;
+ tp->address = paddr;
tp->symbol = strdup(symbol);
if (!tp->symbol)
return -ENOMEM;
@@ -1707,7 +1707,7 @@ int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
}
/* Reverse search */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt)
{
Dwarf_Die cudie, spdie, indie;
@@ -1720,14 +1720,14 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- pr_warning("Failed to find debug information for address %lx\n",
+ pr_warning("Failed to find debug information for address %" PRIx64 "\n",
addr);
ret = -EINVAL;
goto end;
}
/* Find a corresponding line (filename and lineno) */
- cu_find_lineinfo(&cudie, addr, &fname, &lineno);
+ cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno);
/* Don't care whether it failed or not */
/* Find a corresponding function (name, baseline and baseaddr) */
@@ -1742,7 +1742,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
}
fname = dwarf_decl_file(&spdie);
- if (addr == (unsigned long)baseaddr) {
+ if (addr == baseaddr) {
/* Function entry - Relative line number is 0 */
lineno = baseline;
goto post;
@@ -1788,7 +1788,7 @@ post:
if (lineno)
ppt->line = lineno - baseline;
else if (basefunc) {
- ppt->offset = addr - (unsigned long)baseaddr;
+ ppt->offset = addr - baseaddr;
func = basefunc;
}
@@ -1828,8 +1828,7 @@ static int line_range_add_line(const char *src, unsigned int lineno,
}
static int line_range_walk_cb(const char *fname, int lineno,
- Dwarf_Addr addr __maybe_unused,
- void *data)
+ Dwarf_Addr addr, void *data)
{
struct line_finder *lf = data;
const char *__fname;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 2febb5875678..8bc1c80d3c1c 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,7 +46,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
struct probe_trace_event **tevs);
/* Find a perf_probe_point from debuginfo */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt);
int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e9c929a39973..51f727402912 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -306,6 +306,7 @@ void perf_session__delete(struct perf_session *session)
evlist__delete(session->evlist);
perf_data__close(session->data);
}
+ trace_event__cleanup(&session->tevent);
free(session);
}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 88ce47f2547e..568a88c001c6 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3370,7 +3370,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int
add_key(sb, s[i].name, llen);
}
-const char *sort_help(const char *prefix)
+char *sort_help(const char *prefix)
{
struct strbuf sb;
char *s;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 87a092645aa7..b67c469aba79 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -302,7 +302,7 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
-const char *sort_help(const char *prefix);
+char *sort_help(const char *prefix);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 83a2bc02df15..588601000f3f 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -596,6 +596,18 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
}
}
+static bool is_uncore(struct evsel *evsel)
+{
+ struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ return pmu && pmu->is_uncore;
+}
+
+static bool hybrid_uniquify(struct evsel *evsel)
+{
+ return perf_pmu__has_hybrid() && !is_uncore(evsel);
+}
+
static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data,
bool first),
@@ -604,7 +616,7 @@ static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
if (counter->merged_stat)
return false;
cb(config, counter, data, true);
- if (config->no_merge)
+ if (config->no_merge || hybrid_uniquify(counter))
uniquify_event_name(counter);
else if (counter->auto_merge_stats)
collect_all_aliases(config, counter, cb, data);
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index be8d8d4a4e08..6276ce0c0196 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -12,6 +12,8 @@ import sys
import os
import time
+assert sys.version_info >= (3, 7), "Python version is too old"
+
from collections import namedtuple
from enum import Enum, auto
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 90bc007f1f93..2c6f916ccbaf 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -6,15 +6,13 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
-from __future__ import annotations
import importlib.util
import logging
import subprocess
import os
import shutil
import signal
-from typing import Iterator
-from typing import Optional
+from typing import Iterator, Optional, Tuple
from contextlib import ExitStack
@@ -208,7 +206,7 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT
raise ConfigError(arch + ' is not a valid arch')
def get_source_tree_ops_from_qemu_config(config_path: str,
- cross_compile: Optional[str]) -> tuple[
+ cross_compile: Optional[str]) -> Tuple[
str, LinuxSourceTreeOperations]:
# The module name/path has very little to do with where the actual file
# exists (I learned this through experimentation and could not find it
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index c3c524b79db8..b88db3f51dc5 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -338,9 +338,11 @@ def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
def parse_test_result(lines: LineStream) -> TestResult:
consume_non_diagnostic(lines)
if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.NO_TESTS, [], lines)
+ return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
expected_test_suite_num = parse_test_plan(lines)
- if not expected_test_suite_num:
+ if expected_test_suite_num == 0:
+ return TestResult(TestStatus.NO_TESTS, [], lines)
+ elif expected_test_suite_num is None:
return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
test_suites = []
for i in range(1, expected_test_suite_num + 1):
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index bdae0e5f6197..75045aa0f8a1 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -157,8 +157,18 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.TestStatus.FAILURE,
result.status)
+ def test_no_header(self):
+ empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
+ with open(empty_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()))
+ self.assertEqual(0, len(result.suites))
+ self.assertEqual(
+ kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run.log')
+ empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
@@ -173,7 +183,7 @@ class KUnitParserTest(unittest.TestCase):
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('no tests run!'))
+ print_mock.assert_any_call(StrContains('could not parse test results!'))
print_mock.stop()
file.close()
@@ -309,7 +319,7 @@ class KUnitJsonTest(unittest.TestCase):
result["sub_groups"][1]["test_cases"][0])
def test_no_tests_json(self):
- result = self._json_for('test_is_test_passed-no_tests_run.log')
+ result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
class StrContains(str):
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
index ba69f5c94b75..ba69f5c94b75 100644
--- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
new file mode 100644
index 000000000000..5f48ee659d40
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
@@ -0,0 +1,2 @@
+TAP version 14
+1..0
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index ee27d68d2a1c..b5940e6ca67c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -715,6 +715,8 @@ out:
bpf_object__close(obj);
}
+#include "tailcall_bpf2bpf4.skel.h"
+
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
* across tailcalls combined with bpf2bpf calls. for making sure that tailcall
* counter behaves correctly, bpf program will go through following flow:
@@ -727,10 +729,15 @@ out:
* the loop begins. At the end of the test make sure that the global counter is
* equal to 31, because tailcall counter includes the first two tailcalls
* whereas global counter is incremented only on loop presented on flow above.
+ *
+ * The noise parameter is used to insert bpf_map_update calls into the logic
+ * to force verifier to patch instructions. This allows us to ensure jump
+ * logic remains correct with instruction movement.
*/
-static void test_tailcall_bpf2bpf_4(void)
+static void test_tailcall_bpf2bpf_4(bool noise)
{
- int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ int err, map_fd, prog_fd, main_fd, data_fd, i;
+ struct tailcall_bpf2bpf4__bss val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
@@ -774,11 +781,6 @@ static void test_tailcall_bpf2bpf_4(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
-
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
@@ -788,9 +790,21 @@ static void test_tailcall_bpf2bpf_4(void)
return;
i = 0;
+ val.noise = noise;
+ val.count = 0;
+ err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
- CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
- err, errno, val);
+ CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val.count);
out:
bpf_object__close(obj);
@@ -815,5 +829,7 @@ void test_tailcalls(void)
if (test__start_subtest("tailcall_bpf2bpf_3"))
test_tailcall_bpf2bpf_3();
if (test__start_subtest("tailcall_bpf2bpf_4"))
- test_tailcall_bpf2bpf_4();
+ test_tailcall_bpf2bpf_4(false);
+ if (test__start_subtest("tailcall_bpf2bpf_5"))
+ test_tailcall_bpf2bpf_4(true);
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 77df6d4db895..e89368a50b97 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -3,6 +3,13 @@
#include <bpf/bpf_helpers.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} nop_table SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
@@ -10,10 +17,21 @@ struct {
} jmp_table SEC(".maps");
int count = 0;
+int noise = 0;
+
+__always_inline int subprog_noise(void)
+{
+ __u32 key = 0;
+
+ bpf_map_lookup_elem(&nop_table, &key);
+ return 0;
+}
__noinline
int subprog_tail_2(struct __sk_buff *skb)
{
+ if (noise)
+ subprog_noise();
bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3;
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 615ab254899d..010b59b13917 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -45,6 +45,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
VM_MODE_P47V64_4K,
+ VM_MODE_P44V64_4K,
NUM_VM_MODES,
};
@@ -62,7 +63,7 @@ enum vm_guest_mode {
#elif defined(__s390x__)
-#define VM_MODE_DEFAULT VM_MODE_P47V64_4K
+#define VM_MODE_DEFAULT VM_MODE_P44V64_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 16)
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 9f49f6caafe5..632b74d6b3ca 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -401,7 +401,7 @@ unexpected_exception:
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, 0, 0);
+ vm->page_size);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index 25bff307c71f..c330f414ef96 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -22,6 +22,22 @@ void guest_modes_append_default(void)
}
}
#endif
+#ifdef __s390x__
+ {
+ int kvm_fd, vm_fd;
+ struct kvm_s390_vm_cpu_processor info;
+
+ kvm_fd = open_kvm_dev_path_or_exit();
+ vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, 0);
+ kvm_device_access(vm_fd, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR, &info, false);
+ close(vm_fd);
+ close(kvm_fd);
+ /* Starting with z13 we have 47bits of physical address */
+ if (info.ibc >= 0x30)
+ guest_mode_append(VM_MODE_P47V64_4K, true, true);
+ }
+#endif
}
void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 5b56b57b3c20..10a8ed691c66 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -176,6 +176,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
+ [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@@ -194,6 +195,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
{ 47, 64, 0x1000, 12 },
+ { 44, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -282,6 +284,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
case VM_MODE_P47V64_4K:
vm->pgtable_levels = 5;
break;
+ case VM_MODE_P44V64_4K:
+ vm->pgtable_levels = 5;
+ break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 85b18bb8f762..72a1c9b4882c 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -377,7 +377,8 @@ static void test_add_max_memory_regions(void)
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 42bd658f52a8..af27c7e829c1 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -615,7 +615,7 @@ int main(void)
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
pr_info("Testing access to Hyper-V specific MSRs\n");
guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
index 523371cf8e8f..da2325fcad87 100644
--- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
@@ -71,7 +71,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
+ vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
r = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index c1f831803ad2..d0fe2fdce58c 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
: "+a" (phase));
}
-void self_smi(void)
+static void self_smi(void)
{
x2apic_write_reg(APIC_ICR,
APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
}
-void guest_code(void *arg)
+static void l2_guest_code(void)
{
+ sync_with_host(8);
+
+ sync_with_host(10);
+
+ vmcall();
+}
+
+static void guest_code(void *arg)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+ struct svm_test_data *svm = arg;
+ struct vmx_pages *vmx_pages = arg;
sync_with_host(1);
@@ -74,21 +87,50 @@ void guest_code(void *arg)
sync_with_host(4);
if (arg) {
- if (cpu_has_svm())
- generic_svm_setup(arg, NULL, NULL);
- else
- GUEST_ASSERT(prepare_for_vmx_operation(arg));
+ if (cpu_has_svm()) {
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ } else {
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ }
sync_with_host(5);
self_smi();
sync_with_host(7);
+
+ if (cpu_has_svm()) {
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ svm->vmcb->save.rip += 3;
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ } else {
+ vmlaunch();
+ vmresume();
+ }
+
+ /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
+ sync_with_host(12);
}
sync_with_host(DONE);
}
+void inject_smi(struct kvm_vm *vm)
+{
+ struct kvm_vcpu_events events;
+
+ vcpu_events_get(vm, VCPU_ID, &events);
+
+ events.smi.pending = 1;
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+
+ vcpu_events_set(vm, VCPU_ID, &events);
+}
+
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
"Unexpected stage: #%x, got %x",
stage, stage_reported);
+ /*
+ * Enter SMM during L2 execution and check that we correctly
+ * return from it. Do not perform save/restore while in SMM yet.
+ */
+ if (stage == 8) {
+ inject_smi(vm);
+ continue;
+ }
+
+ /*
+ * Perform save/restore while the guest is in SMM triggered
+ * during L2 execution.
+ */
+ if (stage == 10)
+ inject_smi(vm);
+
state = vcpu_save_state(vm, VCPU_ID);
kvm_vm_release(vm);
kvm_vm_restart(vm, O_RDWR);
diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
index b37585e6aa38..46a97f318f58 100755
--- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
@@ -282,7 +282,9 @@ done
#
echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
for memory in `hotpluggable_online_memory`; do
- offline_memory_expect_fail $memory
+ if [ $((RANDOM % 100)) -lt $ratio ]; then
+ offline_memory_expect_fail $memory
+ fi
done
echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
index c19ecc6a8614..ecbf57f264ed 100755
--- a/tools/testing/selftests/net/icmp_redirect.sh
+++ b/tools/testing/selftests/net/icmp_redirect.sh
@@ -313,9 +313,10 @@ check_exception()
fi
log_test $? 0 "IPv4: ${desc}"
- if [ "$with_redirect" = "yes" ]; then
+ # No PMTU info for test "redirect" and "mtu exception plus redirect"
+ if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
- grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}"
+ grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0"
elif [ -n "${mtu}" ]; then
ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
grep -q "${mtu}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 9a191c1a5de8..f02f4de2f3a0 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1409,7 +1409,7 @@ syncookies_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
- chk_join_nr "subflows limited by server w cookies" 2 2 1
+ chk_join_nr "subflows limited by server w cookies" 2 1 1
# test signal address with cookies
reset_with_cookies
diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c
index 21091be70688..aee631c5284e 100644
--- a/tools/testing/selftests/net/timestamping.c
+++ b/tools/testing/selftests/net/timestamping.c
@@ -47,7 +47,7 @@ static void usage(const char *error)
{
if (error)
printf("invalid option: %s\n", error);
- printf("timestamping interface option*\n\n"
+ printf("timestamping <interface> [bind_phc_index] [option]*\n\n"
"Options:\n"
" IP_MULTICAST_LOOP - looping outgoing multicasts\n"
" SO_TIMESTAMP - normal software time stamping, ms resolution\n"
@@ -58,6 +58,7 @@ static void usage(const char *error)
" SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n"
" SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n"
" SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n"
+ " SOF_TIMESTAMPING_BIND_PHC - request to bind a PHC of PTP vclock\n"
" SIOCGSTAMP - check last socket time stamp\n"
" SIOCGSTAMPNS - more accurate socket time stamp\n"
" PTPV2 - use PTPv2 messages\n");
@@ -311,7 +312,6 @@ static void recvpacket(int sock, int recvmsg_flags,
int main(int argc, char **argv)
{
- int so_timestamping_flags = 0;
int so_timestamp = 0;
int so_timestampns = 0;
int siocgstamp = 0;
@@ -325,6 +325,8 @@ int main(int argc, char **argv)
struct ifreq device;
struct ifreq hwtstamp;
struct hwtstamp_config hwconfig, hwconfig_requested;
+ struct so_timestamping so_timestamping_get = { 0, -1 };
+ struct so_timestamping so_timestamping = { 0, -1 };
struct sockaddr_in addr;
struct ip_mreq imr;
struct in_addr iaddr;
@@ -342,7 +344,12 @@ int main(int argc, char **argv)
exit(1);
}
- for (i = 2; i < argc; i++) {
+ if (argc >= 3 && sscanf(argv[2], "%d", &so_timestamping.bind_phc) == 1)
+ val = 3;
+ else
+ val = 2;
+
+ for (i = val; i < argc; i++) {
if (!strcasecmp(argv[i], "SO_TIMESTAMP"))
so_timestamp = 1;
else if (!strcasecmp(argv[i], "SO_TIMESTAMPNS"))
@@ -356,17 +363,19 @@ int main(int argc, char **argv)
else if (!strcasecmp(argv[i], "PTPV2"))
ptpv2 = 1;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_TX_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_TX_HARDWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RX_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RX_HARDWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+ else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_BIND_PHC"))
+ so_timestamping.flags |= SOF_TIMESTAMPING_BIND_PHC;
else
usage(argv[i]);
}
@@ -385,10 +394,10 @@ int main(int argc, char **argv)
hwtstamp.ifr_data = (void *)&hwconfig;
memset(&hwconfig, 0, sizeof(hwconfig));
hwconfig.tx_type =
- (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
+ (so_timestamping.flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
hwconfig.rx_filter =
- (so_timestamping_flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
+ (so_timestamping.flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
ptpv2 ? HWTSTAMP_FILTER_PTP_V2_L4_SYNC :
HWTSTAMP_FILTER_PTP_V1_L4_SYNC : HWTSTAMP_FILTER_NONE;
hwconfig_requested = hwconfig;
@@ -413,6 +422,9 @@ int main(int argc, char **argv)
sizeof(struct sockaddr_in)) < 0)
bail("bind");
+ if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, interface, if_len))
+ bail("bind device");
+
/* set multicast group for outgoing packets */
inet_aton("224.0.1.130", &iaddr); /* alternate PTP domain 1 */
addr.sin_addr = iaddr;
@@ -444,10 +456,9 @@ int main(int argc, char **argv)
&enabled, sizeof(enabled)) < 0)
bail("setsockopt SO_TIMESTAMPNS");
- if (so_timestamping_flags &&
- setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING,
- &so_timestamping_flags,
- sizeof(so_timestamping_flags)) < 0)
+ if (so_timestamping.flags &&
+ setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping,
+ sizeof(so_timestamping)) < 0)
bail("setsockopt SO_TIMESTAMPING");
/* request IP_PKTINFO for debugging purposes */
@@ -468,14 +479,18 @@ int main(int argc, char **argv)
else
printf("SO_TIMESTAMPNS %d\n", val);
- if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &val, &len) < 0) {
+ len = sizeof(so_timestamping_get);
+ if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping_get,
+ &len) < 0) {
printf("%s: %s\n", "getsockopt SO_TIMESTAMPING",
strerror(errno));
} else {
- printf("SO_TIMESTAMPING %d\n", val);
- if (val != so_timestamping_flags)
- printf(" not the expected value %d\n",
- so_timestamping_flags);
+ printf("SO_TIMESTAMPING flags %d, bind phc %d\n",
+ so_timestamping_get.flags, so_timestamping_get.bind_phc);
+ if (so_timestamping_get.flags != so_timestamping.flags ||
+ so_timestamping_get.bind_phc != so_timestamping.bind_phc)
+ printf(" not expected, flags %d, bind phc %d\n",
+ so_timestamping.flags, so_timestamping.bind_phc);
}
/* send packets forever every five seconds */
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index cd6430b39982..8748199ac109 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -5,7 +5,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
- ipip-conntrack-mtu.sh
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh
new file mode 100755
index 000000000000..e7d7bf13cff5
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check that UNREPLIED tcp conntrack will eventually timeout.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+waittime=20
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup() {
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+ ip netns pids $ns2 | xargs kill 2>/dev/null
+
+ ip netns del $ns1
+ ip netns del $ns2
+}
+
+ipv4() {
+ echo -n 192.168.$1.2
+}
+
+check_counter()
+{
+ ns=$1
+ name=$2
+ expect=$3
+ local lret=0
+
+ cnt=$(ip netns exec $ns2 nft list counter inet filter "$name" | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ echo "ERROR: counter $name in $ns2 has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns2 nft list counter inet filter "$name" 1>&2
+ lret=1
+ fi
+
+ return $lret
+}
+
+# Create test namespaces
+ip netns add $ns1 || exit 1
+
+trap cleanup EXIT
+
+ip netns add $ns2 || exit 1
+
+# Connect the namespace to the host using a veth pair
+ip -net $ns1 link add name veth1 type veth peer name veth2
+ip -net $ns1 link set netns $ns2 dev veth2
+
+ip -net $ns1 link set up dev lo
+ip -net $ns2 link set up dev lo
+ip -net $ns1 link set up dev veth1
+ip -net $ns2 link set up dev veth2
+
+ip -net $ns2 addr add 10.11.11.2/24 dev veth2
+ip -net $ns2 route add default via 10.11.11.1
+
+ip netns exec $ns2 sysctl -q net.ipv4.conf.veth2.forwarding=1
+
+# add a rule inside NS so we enable conntrack
+ip netns exec $ns1 iptables -A INPUT -m state --state established,related -j ACCEPT
+
+ip -net $ns1 addr add 10.11.11.1/24 dev veth1
+ip -net $ns1 route add 10.99.99.99 via 10.11.11.2
+
+# Check connectivity works
+ip netns exec $ns1 ping -q -c 2 10.11.11.2 >/dev/null || exit 1
+
+ip netns exec $ns2 nc -l -p 8080 < /dev/null &
+
+# however, conntrack entries are there
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet filter {
+ counter connreq { }
+ counter redir { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+ ct state new tcp flags syn ip daddr 10.99.99.99 tcp dport 80 counter name "connreq" accept
+ ct state new ct status dnat tcp dport 8080 counter name "redir" accept
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not load nft rules"
+ exit 1
+fi
+
+ip netns exec $ns2 sysctl -q net.netfilter.nf_conntrack_tcp_timeout_syn_sent=10
+
+echo "INFO: connect $ns1 -> $ns2 to the virtual ip"
+ip netns exec $ns1 bash -c 'while true ; do
+ nc -p 60000 10.99.99.99 80
+ sleep 1
+ done' &
+
+sleep 1
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ ip daddr 10.99.99.99 tcp dport 80 redirect to :8080
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not load nat redirect"
+ exit 1
+fi
+
+count=$(ip netns exec $ns2 conntrack -L -p tcp --dport 80 2>/dev/null | wc -l)
+if [ $count -eq 0 ]; then
+ echo "ERROR: $ns2 did not pick up tcp connection from peer"
+ exit 1
+fi
+
+echo "INFO: NAT redirect added in ns $ns2, waiting for $waittime seconds for nat to take effect"
+for i in $(seq 1 $waittime); do
+ echo -n "."
+
+ sleep 1
+
+ count=$(ip netns exec $ns2 conntrack -L -p tcp --reply-port-src 8080 2>/dev/null | wc -l)
+ if [ $count -gt 0 ]; then
+ echo
+ echo "PASS: redirection took effect after $i seconds"
+ break
+ fi
+
+ m=$((i%20))
+ if [ $m -eq 0 ]; then
+ echo " waited for $i seconds"
+ fi
+done
+
+expect="packets 1 bytes 60"
+check_counter "$ns2" "redir" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+if [ $ret -eq 0 ];then
+ echo "PASS: redirection counter has expected values"
+else
+ echo "ERROR: no tcp connection was redirected"
+fi
+
+exit $ret
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index f08f5e82460b..0be80c213f7f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
- kvm_iodevice_destructor(&dev->dev);
/*
* On failure, unregister destroys all devices on the
@@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
*/
if (r)
break;
+ kvm_iodevice_destructor(&dev->dev);
}
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7d95126cda9e..986959833d70 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -935,7 +935,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
stat_data->kvm = kvm;
stat_data->desc = pdesc;
stat_data->kind = KVM_STAT_VCPU;
- kvm->debugfs_stat_data[i] = stat_data;
+ kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm->debugfs_dentry, stat_data,
&stat_fops_per_vm);