summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-09-16 13:58:38 -0700
committerJakub Kicinski <kuba@kernel.org>2021-09-16 13:58:38 -0700
commit561bed688bffedd6bbdfa70af7a4f64f0a3e2140 (patch)
tree19256b7a70dcdb9a95c4a7494a0b8bb9db10ef30 /drivers
parent4b5a3ab17c6c942bd428984b6b37fe3c07f18ab3 (diff)
parentfc0c0548c1a2e676d3a928aaed70f2d4d254e395 (diff)
downloadlinux-561bed688bffedd6bbdfa70af7a4f64f0a3e2140.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts! Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_memhotplug.c46
-rw-r--r--drivers/acpi/cppc_acpi.c47
-rw-r--r--drivers/acpi/prmt.c10
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/x86/s2idle.c67
-rw-r--r--drivers/auxdisplay/cfag12864b.c2
-rw-r--r--drivers/auxdisplay/charlcd.c4
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/ks0108.c18
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/memory.c225
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/wakeirq.c11
-rw-r--r--drivers/block/n64cart.c4
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c23
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c224
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c308
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c151
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c65
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/sh-cpufreq.c11
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c26
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/cxl/Makefile4
-rw-r--r--drivers/cxl/acpi.c12
-rw-r--r--drivers/cxl/core/Makefile8
-rw-r--r--drivers/cxl/core/bus.c (renamed from drivers/cxl/core.c)464
-rw-r--r--drivers/cxl/core/core.h20
-rw-r--r--drivers/cxl/core/memdev.c246
-rw-r--r--drivers/cxl/core/pmem.c230
-rw-r--r--drivers/cxl/core/regs.c249
-rw-r--r--drivers/cxl/cxl.h1
-rw-r--r--drivers/cxl/cxlmem.h (renamed from drivers/cxl/mem.h)35
-rw-r--r--drivers/cxl/pci.c439
-rw-r--r--drivers/cxl/pci.h1
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/dax/kmem.c43
-rw-r--r--drivers/dax/super.c191
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/dma-buf/Kconfig4
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c18
-rw-r--r--drivers/dma/altera-msgdma.c37
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c56
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h4
-rw-r--r--drivers/dma/dw/idma32.c138
-rw-r--r--drivers/dma/dw/internal.h16
-rw-r--r--drivers/dma/dw/of.c49
-rw-r--r--drivers/dma/dw/pci.c6
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/ep93xx_dma.c6
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/hisi_dma.c10
-rw-r--r--drivers/dma/idxd/Makefile8
-rw-r--r--drivers/dma/idxd/bus.c91
-rw-r--r--drivers/dma/idxd/cdev.c73
-rw-r--r--drivers/dma/idxd/compat.c107
-rw-r--r--drivers/dma/idxd/device.c382
-rw-r--r--drivers/dma/idxd/dma.c96
-rw-r--r--drivers/dma/idxd/idxd.h167
-rw-r--r--drivers/dma/idxd/init.c148
-rw-r--r--drivers/dma/idxd/irq.c190
-rw-r--r--drivers/dma/idxd/registers.h6
-rw-r--r--drivers/dma/idxd/submit.c43
-rw-r--r--drivers/dma/idxd/sysfs.c601
-rw-r--r--drivers/dma/ppc4xx/adma.c12
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/ptdma/Makefile10
-rw-r--r--drivers/dma/ptdma/ptdma-debugfs.c106
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c305
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c389
-rw-r--r--drivers/dma/ptdma/ptdma-pci.c243
-rw-r--r--drivers/dma/ptdma/ptdma.h324
-rw-r--r--drivers/dma/sh/Kconfig9
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rz-dmac.c969
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c73
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c17
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firmware/qcom_scm.c58
-rw-r--r--drivers/firmware/qcom_scm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hwmon/mr75203.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/light/as73211.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/input/joystick/analog.c107
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adc-keys.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c4
-rw-r--r--drivers/input/misc/Kconfig22
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c183
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c207
-rw-r--r--drivers/input/mouse/elan_i2c.h3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/serio/parkbd.c14
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c1
-rw-r--r--drivers/input/touchscreen/mms114.c15
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd/init.c48
-rw-r--r--drivers/iommu/intel/svm.c15
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/misc/habanalabs/common/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c4
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c1299
-rw-r--r--drivers/misc/habanalabs/common/context.c146
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c184
-rw-r--r--drivers/misc/habanalabs/common/device.c163
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c56
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h421
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c13
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c198
-rw-r--r--drivers/misc/habanalabs/common/memory.c169
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c2
-rw-r--r--drivers/misc/habanalabs/common/state_dump.c718
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c20
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c716
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h19
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c8
-rw-r--r--drivers/misc/habanalabs/goya/goya.c102
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h115
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h62
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h17
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h2
-rw-r--r--drivers/misc/lkdtm/core.c10
-rw-r--r--drivers/misc/lkdtm/lkdtm.h28
-rw-r--r--drivers/misc/pci_endpoint_test.c9
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c6
-rw-r--r--drivers/net/dsa/qca8k.c30
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c22
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c88
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c115
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.c78
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c106
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c79
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c44
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/ipa/ipa_table.c3
-rw-r--r--drivers/net/phy/dp83640_reg.h2
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/phylink.c30
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/nvdimm/label.c256
-rw-r--r--drivers/nvdimm/label.h1
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h150
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c68
-rw-r--r--drivers/nvme/host/multipath.c19
-rw-r--r--drivers/nvme/host/nvme.h10
-rw-r--r--drivers/nvme/host/tcp.c22
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c5
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/nvmet.h11
-rw-r--r--drivers/nvme/target/passthru.c14
-rw-r--r--drivers/of/property.c3
-rw-r--r--drivers/parisc/dino.c18
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c61
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c200
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c16
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h29
-rw-r--r--drivers/pci/controller/dwc/Kconfig48
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c16
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c279
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c460
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c54
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c332
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c15
-rw-r--r--drivers/pci/controller/pci-aardvark.c334
-rw-r--r--drivers/pci/controller/pci-ftpci100.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c153
-rw-r--r--drivers/pci/controller/pci-tegra.c38
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera.c10
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c9
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c13
-rw-r--r--drivers/pci/controller/pcie-mediatek.c64
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c23
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c94
-rw-r--r--drivers/pci/controller/pcie-rcar.h7
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c18
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c25
-rw-r--r--drivers/pci/controller/pcie-xilinx.c9
-rw-r--r--drivers/pci/controller/vmd.c55
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c89
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c74
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c134
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c146
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/TODO3
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c5
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pnv_php.c2
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-acpi.c159
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c331
-rw-r--r--drivers/pci/pci.h47
-rw-r--r--drivers/pci/pcie/aer.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/pcie/ptm.c4
-rw-r--r--drivers/pci/probe.c29
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c128
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/syscall.c7
-rw-r--r--drivers/pci/vpd.c490
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c14
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h94
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c27
-rw-r--r--drivers/pwm/Kconfig5
-rw-r--r--drivers/pwm/core.c4
-rw-r--r--drivers/pwm/pwm-ab8500.c35
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c5
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c5
-rw-r--r--drivers/pwm/pwm-atmel.c102
-rw-r--r--drivers/pwm/pwm-bcm-kona.c12
-rw-r--r--drivers/pwm/pwm-brcmstb.c5
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-ep93xx.c11
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c10
-rw-r--r--drivers/pwm/pwm-hibvt.c4
-rw-r--r--drivers/pwm/pwm-img.c20
-rw-r--r--drivers/pwm/pwm-imx-tpm.c5
-rw-r--r--drivers/pwm/pwm-imx27.c14
-rw-r--r--drivers/pwm/pwm-intel-lgm.c12
-rw-r--r--drivers/pwm/pwm-iqs620a.c16
-rw-r--r--drivers/pwm/pwm-jz4740.c12
-rw-r--r--drivers/pwm/pwm-keembay.c12
-rw-r--r--drivers/pwm/pwm-lp3943.c12
-rw-r--r--drivers/pwm/pwm-lpc32xx.c22
-rw-r--r--drivers/pwm/pwm-mediatek.c12
-rw-r--r--drivers/pwm/pwm-mtk-disp.c174
-rw-r--r--drivers/pwm/pwm-mxs.c25
-rw-r--r--drivers/pwm/pwm-ntxec.c14
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c5
-rw-r--r--drivers/pwm/pwm-pca9685.c5
-rw-r--r--drivers/pwm/pwm-pxa.c13
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c12
-rw-r--r--drivers/pwm/pwm-rcar.c5
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c5
-rw-r--r--drivers/pwm/pwm-rockchip.c16
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-sifive.c6
-rw-r--r--drivers/pwm/pwm-sl28cpld.c12
-rw-r--r--drivers/pwm/pwm-stm32-lp.c12
-rw-r--r--drivers/pwm/pwm-sun4i.c5
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/pwm-twl-led.c17
-rw-r--r--drivers/pwm/pwm-twl.c17
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/lib.c107
-rw-r--r--drivers/rtc/lib_test.c81
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-rx8025.c46
-rw-r--r--drivers/rtc/rtc-s5m.c48
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/Makefile1
-rw-r--r--drivers/s390/block/xpram.c416
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/ctrlchar.c11
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/cio/blacklist.c5
-rw-r--r--drivers/s390/cio/device.c21
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c14
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c8
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c10
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c8
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c38
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/cxlflash/main.c34
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/qcom/Kconfig10
-rw-r--r--drivers/thermal/qcom/Makefile1
-rw-r--r--drivers/thermal/qcom/lmh.c232
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c110
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/tegra/Kconfig9
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c673
-rw-r--r--drivers/vdpa/Kconfig11
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h25
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c249
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h26
-rw-r--r--drivers/vdpa/mlx5/core/mr.c81
-rw-r--r--drivers/vdpa/mlx5/core/resources.c35
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c555
-rw-r--r--drivers/vdpa/vdpa.c9
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c29
-rw-r--r--drivers/vdpa/vdpa_user/Makefile5
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c545
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h73
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1641
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c17
-rw-r--r--drivers/vhost/iotlb.c20
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/scsi.c14
-rw-r--r--drivers/vhost/vdpa.c188
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/video/fbdev/core/fbmem.c6
-rw-r--r--drivers/virtio/virtio.c56
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_mem.c26
458 files changed, 19523 insertions, 6639 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 8cc195c4c861..24f662d8bd39 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,6 +54,7 @@ struct acpi_memory_info {
struct acpi_memory_device {
struct acpi_device *device;
struct list_head res_list;
+ int mgid;
};
static acpi_status
@@ -169,12 +170,33 @@ static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
+ mhp_t mhp_flags = MHP_NID_IS_MGID;
int result, num_enabled = 0;
struct acpi_memory_info *info;
- mhp_t mhp_flags = MHP_NONE;
- int node;
+ u64 total_length = 0;
+ int node, mgid;
node = acpi_get_node(handle);
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (!info->length)
+ continue;
+ /* We want a single node for the whole memory group */
+ if (node < 0)
+ node = memory_add_physaddr_to_nid(info->start_addr);
+ total_length += info->length;
+ }
+
+ if (!total_length) {
+ dev_err(&mem_device->device->dev, "device is empty\n");
+ return -EINVAL;
+ }
+
+ mgid = memory_group_register_static(node, PFN_UP(total_length));
+ if (mgid < 0)
+ return mgid;
+ mem_device->mgid = mgid;
+
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
@@ -182,22 +204,16 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
- if (info->enabled) { /* just sanity check...*/
- num_enabled++;
- continue;
- }
/*
* If the memory block size is zero, please ignore it.
* Don't try to do the following memory hotplug flowchart.
*/
if (!info->length)
continue;
- if (node < 0)
- node = memory_add_physaddr_to_nid(info->start_addr);
if (mhp_supports_memmap_on_memory(info->length))
mhp_flags |= MHP_MEMMAP_ON_MEMORY;
- result = __add_memory(node, info->start_addr, info->length,
+ result = __add_memory(mgid, info->start_addr, info->length,
mhp_flags);
/*
@@ -239,19 +255,14 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- acpi_handle handle = mem_device->device->handle;
struct acpi_memory_info *info, *n;
- int nid = acpi_get_node(handle);
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (!info->enabled)
continue;
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(info->start_addr);
-
acpi_unbind_memory_blocks(info);
- __remove_memory(nid, info->start_addr, info->length);
+ __remove_memory(info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
@@ -262,6 +273,10 @@ static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
if (!mem_device)
return;
+ /* In case we succeeded adding *some* memory, unregistering fails. */
+ if (mem_device->mgid >= 0)
+ memory_group_unregister(mem_device->mgid);
+
acpi_memory_free_device_resources(mem_device);
mem_device->device->driver_data = NULL;
kfree(mem_device);
@@ -282,6 +297,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&mem_device->res_list);
mem_device->device = device;
+ mem_device->mgid = -1;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
device->driver_data = mem_device;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a4d4eebba1da..bd482108310c 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1008,23 +1008,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-/**
- * cppc_get_desired_perf - Get the value of desired performance register.
- * @cpunum: CPU from which to get desired performance.
- * @desired_perf: address of a variable to store the returned desired performance
- *
- * Return: 0 for success, -EIO otherwise.
- */
-int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cpc_register_resource *desired_reg;
- struct cppc_pcc_data *pcc_ss_data = NULL;
-
- desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+ struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(reg)) {
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (pcc_ss_id < 0)
@@ -1035,7 +1026,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
else
ret = -EIO;
@@ -1044,13 +1035,37 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
return ret;
}
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
return 0;
}
+
+/**
+ * cppc_get_desired_perf - Get the desired performance register value.
+ * @cpunum: CPU from which to get desired performance.
+ * @desired_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
+ * cppc_get_nominal_perf - Get the nominal performance register value.
+ * @cpunum: CPU from which to get nominal performance.
+ * @nominal_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+}
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 1f6007abcf18..89c22bc55057 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -288,10 +288,18 @@ invalid_guid:
void __init init_prmt(void)
{
+ struct acpi_table_header *tbl;
acpi_status status;
- int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
+ int mc;
+
+ status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ return;
+
+ mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
+ acpi_put_table(tbl);
/*
* Return immediately if PRMT table is not present or no PRM module found.
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b24513ec3fae..5b54c80b9d32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -16,7 +16,6 @@
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
-#include <linux/nls.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 3a308461246a..bd92b549fd5a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -449,25 +449,30 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
- if (lps0_dsm_func_mask_microsoft > 0) {
+ /* Screen off */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_OFF_AMD :
+ ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* LPS0 entry */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_ENTRY_AMD :
+ ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ /* modern standby entry */
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
-
return 0;
}
@@ -476,24 +481,30 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
- if (lps0_dsm_func_mask_microsoft > 0) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ /* Modern standby exit */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
+
+ /* LPS0 exit */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_EXIT_AMD :
+ ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* Screen on */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask, lps0_dsm_guid);
- }
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_ON_AMD :
+ ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index fd430e6866a1..6526aa51fb1d 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -33,7 +33,7 @@
*/
static unsigned int cfag12864b_rate = CONFIG_CFAG12864B_RATE;
-module_param(cfag12864b_rate, uint, S_IRUGO);
+module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 24fd6f369ebe..304accde365c 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -637,9 +637,7 @@ static int panel_notify_sys(struct notifier_block *this, unsigned long code,
}
static struct notifier_block panel_notifier = {
- panel_notify_sys,
- NULL,
- 0
+ .notifier_call = panel_notify_sys,
};
int charlcd_register(struct charlcd *lcd)
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 2e5e7c993933..8b2a0eb3f32a 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
- kfree(lcd->drvdata);
charlcd_unregister(lcd);
+ kfree(lcd->drvdata);
kfree(lcd);
return 0;
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 03c95ad4216c..e871b94a1911 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -28,11 +28,11 @@
*/
static unsigned int ks0108_port = CONFIG_KS0108_PORT;
-module_param(ks0108_port, uint, S_IRUGO);
+module_param(ks0108_port, uint, 0444);
MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected");
static unsigned int ks0108_delay = CONFIG_KS0108_DELAY;
-module_param(ks0108_delay, uint, S_IRUGO);
+module_param(ks0108_delay, uint, 0444);
MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)");
/*
@@ -167,19 +167,7 @@ static struct parport_driver ks0108_parport_driver = {
.detach = ks0108_parport_detach,
.devmodel = true,
};
-
-static int __init ks0108_init(void)
-{
- return parport_register_driver(&ks0108_parport_driver);
-}
-
-static void __exit ks0108_exit(void)
-{
- parport_unregister_driver(&ks0108_parport_driver);
-}
-
-module_init(ks0108_init);
-module_exit(ks0108_exit);
+module_parport_driver(ks0108_parport_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 46c503486e96..00fb4120a5b3 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
size = numa_distance_cnt * numa_distance_cnt *
sizeof(numa_distance[0]);
- memblock_free(__pa(numa_distance), size);
+ memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0;
numa_distance = NULL;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 921312a8d957..43407665918f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -149,6 +149,7 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
@@ -165,6 +166,7 @@ void topology_set_thermal_pressure(const struct cpumask *cpus,
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
+EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index e3fd2dbf4eea..365cd4a7f239 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -82,6 +82,12 @@ static struct bus_type memory_subsys = {
*/
static DEFINE_XARRAY(memory_blocks);
+/*
+ * Memory groups, indexed by memory group id (mgid).
+ */
+static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
+#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
+
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
@@ -177,7 +183,8 @@ static int memory_block_online(struct memory_block *mem)
struct zone *zone;
int ret;
- zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
+ start_pfn, nr_pages);
/*
* Although vmemmap pages have a different lifecycle than the pages
@@ -193,7 +200,7 @@ static int memory_block_online(struct memory_block *mem)
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, zone);
+ nr_pages - nr_vmemmap_pages, zone, mem->group);
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
@@ -205,7 +212,8 @@ static int memory_block_online(struct memory_block *mem)
* now already properly populated.
*/
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ nr_vmemmap_pages);
return ret;
}
@@ -215,24 +223,23 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
- struct zone *zone;
int ret;
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
- if (nr_vmemmap_pages) {
- zone = page_zone(pfn_to_page(start_pfn));
- adjust_present_page_count(zone, -nr_vmemmap_pages);
- }
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ -nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages);
+ nr_pages - nr_vmemmap_pages, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn),
+ mem->group, nr_vmemmap_pages);
return ret;
}
@@ -374,12 +381,13 @@ static ssize_t phys_device_show(struct device *dev,
#ifdef CONFIG_MEMORY_HOTREMOVE
static int print_allowed_zone(char *buf, int len, int nid,
+ struct memory_group *group,
unsigned long start_pfn, unsigned long nr_pages,
int online_type, struct zone *default_zone)
{
struct zone *zone;
- zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
if (zone == default_zone)
return 0;
@@ -392,9 +400,10 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct memory_group *group = mem->group;
struct zone *default_zone;
+ int nid = mem->nid;
int len = 0;
- int nid;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -413,14 +422,13 @@ static ssize_t valid_zones_show(struct device *dev,
goto out;
}
- nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
- nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+ start_pfn, nr_pages);
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
out:
len += sysfs_emit_at(buf, len, "\n");
@@ -634,7 +642,8 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages)
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -652,6 +661,12 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->state = state;
mem->nid = NUMA_NO_NODE;
mem->nr_vmemmap_pages = nr_vmemmap_pages;
+ INIT_LIST_HEAD(&mem->group_next);
+
+ if (group) {
+ mem->group = group;
+ list_add(&mem->group_next, &group->memory_blocks);
+ }
ret = register_memory(mem);
@@ -671,7 +686,7 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0);
+ MEM_ONLINE, 0, NULL);
}
static void unregister_memory(struct memory_block *memory)
@@ -681,6 +696,11 @@ static void unregister_memory(struct memory_block *memory)
WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+ if (memory->group) {
+ list_del(&memory->group_next);
+ memory->group = NULL;
+ }
+
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
@@ -694,7 +714,8 @@ static void unregister_memory(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages)
+ unsigned long vmemmap_pages,
+ struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
@@ -707,7 +728,8 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
+ ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
+ group);
if (ret)
break;
}
@@ -891,3 +913,164 @@ int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
for_each_memory_block_cb);
}
+
+/*
+ * This is an internal helper to unify allocation and initialization of
+ * memory groups. Note that the passed memory group will be copied to a
+ * dynamically allocated memory group. After this call, the passed
+ * memory group should no longer be used.
+ */
+static int memory_group_register(struct memory_group group)
+{
+ struct memory_group *new_group;
+ uint32_t mgid;
+ int ret;
+
+ if (!node_possible(group.nid))
+ return -EINVAL;
+
+ new_group = kzalloc(sizeof(group), GFP_KERNEL);
+ if (!new_group)
+ return -ENOMEM;
+ *new_group = group;
+ INIT_LIST_HEAD(&new_group->memory_blocks);
+
+ ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(new_group);
+ return ret;
+ } else if (group.is_dynamic) {
+ xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
+ }
+ return mgid;
+}
+
+/**
+ * memory_group_register_static() - Register a static memory group.
+ * @nid: The node id.
+ * @max_pages: The maximum number of pages we'll have in this static memory
+ * group.
+ *
+ * Register a new static memory group and return the memory group id.
+ * All memory in the group belongs to a single unit, such as a DIMM. All
+ * memory belonging to a static memory group is added in one go to be removed
+ * in one go -- it's static.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
+ * returns the new memory group id.
+ */
+int memory_group_register_static(int nid, unsigned long max_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .s = {
+ .max_pages = max_pages,
+ },
+ };
+
+ if (!max_pages)
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_static);
+
+/**
+ * memory_group_register_dynamic() - Register a dynamic memory group.
+ * @nid: The node id.
+ * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
+ * memory group.
+ *
+ * Register a new dynamic memory group and return the memory group id.
+ * Memory within a dynamic memory group is added/removed dynamically
+ * in unit_pages.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if unit_pages is invalid (0, not a
+ * power of two, smaller than a single memory block). Otherwise, returns the
+ * new memory group id.
+ */
+int memory_group_register_dynamic(int nid, unsigned long unit_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .is_dynamic = true,
+ .d = {
+ .unit_pages = unit_pages,
+ },
+ };
+
+ if (!unit_pages || !is_power_of_2(unit_pages) ||
+ unit_pages < PHYS_PFN(memory_block_size_bytes()))
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
+
+/**
+ * memory_group_unregister() - Unregister a memory group.
+ * @mgid: the memory group id
+ *
+ * Unregister a memory group. If any memory block still belongs to this
+ * memory group, unregistering will fail.
+ *
+ * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
+ * memory blocks still belong to this memory group and returns 0 if
+ * unregistering succeeded.
+ */
+int memory_group_unregister(int mgid)
+{
+ struct memory_group *group;
+
+ if (mgid < 0)
+ return -EINVAL;
+
+ group = xa_load(&memory_groups, mgid);
+ if (!group)
+ return -EINVAL;
+ if (!list_empty(&group->memory_blocks))
+ return -EBUSY;
+ xa_erase(&memory_groups, mgid);
+ kfree(group);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_group_unregister);
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * lookup a memory group. We don't care about locking, as we don't expect a
+ * memory group to get unregistered while adding memory to it -- because
+ * the group and the memory is managed by the same driver.
+ */
+struct memory_group *memory_group_find_by_id(int mgid)
+{
+ return xa_load(&memory_groups, mgid);
+}
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * walk all dynamic memory groups excluding a given memory group, either
+ * belonging to a specific node, or belonging to any node.
+ */
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg)
+{
+ struct memory_group *group;
+ unsigned long index;
+ int ret = 0;
+
+ xa_for_each_marked(&memory_groups, index, group,
+ MEMORY_GROUP_MARK_DYNAMIC) {
+ if (group == excluded)
+ continue;
+#ifdef CONFIG_NUMA
+ if (nid != NUMA_NO_NODE && group->nid != nid)
+ continue;
+#endif /* CONFIG_NUMA */
+ ret = func(group, arg);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index be16bbff11cc..c56d34f8158f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -785,8 +785,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static int __ref get_nid_for_pfn(unsigned long pfn)
{
- if (!pfn_valid_within(pfn))
- return -1;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
if (system_state < SYSTEM_RUNNING)
return early_pfn_to_nid(pfn);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d568772152c2..cbea78e79f3d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1642,7 +1642,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
dev->power.may_skip_resume = true;
- dev->power.must_resume = false;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 3bad3266a2ad..b91a3a9bf9f6 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -12,14 +12,11 @@
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
- * @irq: Device wake-up capable interrupt
* @wirq: Wake irq specific data
*
- * Internal function to attach either a device IO interrupt or a
- * dedicated wake-up interrupt as a wake IRQ.
+ * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
*/
-static int dev_pm_attach_wake_irq(struct device *dev, int irq,
- struct wake_irq *wirq)
+static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
{
unsigned long flags;
@@ -65,7 +62,7 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
kfree(wirq);
@@ -196,7 +193,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_name;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
goto err_free_irq;
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index c84be0028f63..26798da661bd 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -129,8 +129,8 @@ static int __init n64cart_probe(struct platform_device *pdev)
}
reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!reg_base)
- return -EINVAL;
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 57c6ae7debd9..9b3bd083b411 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -762,7 +762,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
- if (likely(!virtblk_queue_depth)) {
+ if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
@@ -836,7 +836,7 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
- if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+ if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb466981dc1b..6f3272b58ced 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -591,7 +591,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
@@ -683,10 +683,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->io.dev,
- "Couldn't get irq info: %x.\n", msg[2]);
- dev_warn(smi_info->io.dev,
- "Maybe ok, but ipmi might run very slowly.\n");
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Couldn't get irq info: %x,\n"
+ "Maybe ok, but ipmi might run very slowly.\n",
+ msg[2]);
smi_info->si_state = SI_NORMAL;
break;
}
@@ -721,7 +721,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
@@ -1343,7 +1343,7 @@ retry:
if (cc != IPMI_CC_NO_ERROR &&
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"BMC returned 0x%2.2x, retry get bmc device id\n",
cc);
goto retry;
@@ -1605,7 +1605,7 @@ static ssize_t name##_show(struct device *dev, \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
-static DEVICE_ATTR(name, 0444, name##_show, NULL)
+static DEVICE_ATTR_RO(name)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr,
@@ -1615,7 +1615,7 @@ static ssize_t type_show(struct device *dev,
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR_RO(type);
static ssize_t interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
@@ -1626,8 +1626,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
return snprintf(buf, 10, "%d\n", enabled);
}
-static DEVICE_ATTR(interrupts_enabled, 0444,
- interrupts_enabled_show, NULL);
+static DEVICE_ATTR_RO(interrupts_enabled);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
@@ -1658,7 +1657,7 @@ static ssize_t params_show(struct device *dev,
smi_info->io.irq,
smi_info->io.slave_addr);
}
-static DEVICE_ATTR(params, 0444, params_show, NULL);
+static DEVICE_ATTR_RO(params);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 053089f83677..3236706771b1 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -176,10 +176,6 @@ static const struct parent_map gcc_parent_map_2[] = {
{ P_GPLL0_OUT_ODD, 2 },
};
-static const struct clk_parent_data gcc_parent_data_2[] = {
- { .fw_name = "bi_tcxo" },
- { .hw = &gpll0_out_odd.clkr.hw },
-};
static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .fw_name = "bi_tcxo_ao" },
{ .hw = &gpll0_out_odd.clkr.hw },
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..954749afb5fe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -133,6 +133,18 @@ config ARM_MEDIATEK_CPUFREQ
help
This adds the CPUFreq driver support for MediaTek SoCs.
+config ARM_MEDIATEK_CPUFREQ_HW
+ tristate "MediaTek CPUFreq HW driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default m
+ help
+ Support for the CPUFreq HW driver.
+ Some MediaTek chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 27d3bd7ea9d4..48ee5859030c 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ_HW) += mediatek-cpufreq-hw.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b49612895c78..28467d83c745 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -889,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = !acpi_pstate_strict &&
!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
return result;
err_unreg:
@@ -918,16 +921,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
- policy->cpu);
- unsigned int freq = policy->freq_table[0].frequency;
-
- if (perf->states[0].core_frequency * 1000 != freq)
- pr_warn(FW_WARN "P-state 0 is not max freq\n");
-}
-
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -955,7 +948,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
- .ready = acpi_cpufreq_cpu_ready,
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 231e585f6ba2..ca1d103ec449 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -137,11 +137,15 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
+ { .compatible = "qcom,sm8250", },
+ { .compatible = "qcom,sm8350", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ece52863ba62..8fcaba541539 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -143,8 +143,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_clk_put:
@@ -184,6 +182,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.online = cpufreq_online,
.offline = cpufreq_offline,
+ .register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 06c526d66dd3..5782b15a8caa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,19 @@ static int cpufreq_online(unsigned int cpu)
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Register with the energy model before
+ * sched_cpufreq_governor_change() is called, which will result
+ * in rebuilding of the sched domains, which should only be done
+ * once the energy model is properly initialized for the policy
+ * first.
+ *
+ * Also, this should be called before the policy is registered
+ * with cooling framework.
+ */
+ if (cpufreq_driver->register_em)
+ cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy);
@@ -1504,10 +1517,6 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 5bf5fc759881..90beb26ed34e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -192,7 +192,6 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
}
@@ -204,6 +203,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b4ffe6c8a0d0..1097f826ad70 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -32,7 +32,6 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
-#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
@@ -220,7 +219,6 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
- * @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -259,7 +257,6 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
- struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
@@ -271,6 +268,7 @@ static struct cpudata **all_cpu_data;
* @get_min: Callback to get minimum P state
* @get_turbo: Callback to get turbo P state
* @get_scaling: Callback to get frequency scaling factor
+ * @get_cpu_scaling: Get frequency scaling factor for a given cpu
* @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
@@ -284,6 +282,7 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_cpu_scaling)(int cpu);
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
@@ -387,6 +386,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
return cppc_perf.nominal_perf;
}
+static u32 intel_pstate_cppc_nominal(int cpu)
+{
+ u64 nominal_perf;
+
+ if (cppc_get_nominal_perf(cpu, &nominal_perf))
+ return 0;
+
+ return nominal_perf;
+}
#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
@@ -473,20 +481,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
-
-static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
-{
- return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
-}
-
-static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
- struct cppc_perf_caps *caps)
-{
- if (cppc_get_perf_caps(cpu->cpu, caps))
- return false;
-
- return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
-}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -509,15 +503,8 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
-static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
-{
- pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
-
- cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling;
-}
-
/**
- * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
*
* On hybrid processors, HWP may expose more performance levels than there are
@@ -525,115 +512,46 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
* scaling factor between HWP performance levels and CPU frequency will be less
* than the scaling factor between P-state values and CPU frequency.
*
- * In that case, the scaling factor between HWP performance levels and CPU
- * frequency needs to be determined which can be done with the help of the
- * observation that certain HWP performance levels should correspond to certain
- * P-states, like for example the HWP highest performance should correspond
- * to the maximum turbo P-state of the CPU.
+ * In that case, adjust the CPU parameters used in computations accordingly.
*/
-static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
+static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
{
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo();
int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
- int perf_ctl_max = pstate_funcs.get_max();
- int max_freq = perf_ctl_max * perf_ctl_scaling;
- int scaling = INT_MAX;
- int freq;
+ int scaling = cpu->pstate.scaling;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max);
+ pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
-
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
-
-#ifdef CONFIG_ACPI
- if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
- struct cppc_perf_caps caps;
-
- if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
- if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
-
- /*
- * If the CPPC nominal performance is valid, it
- * can be assumed to correspond to cpu_khz.
- */
- if (caps.nominal_perf == perf_ctl_max_phys) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
- } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
-
- /*
- * If the CPPC guaranteed performance is valid,
- * it can be assumed to correspond to max_freq.
- */
- if (caps.guaranteed_perf == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
- }
- }
- }
-#endif
- /*
- * If using the CPPC data to compute the HWP-to-frequency scaling factor
- * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
- * the assumption that it corresponds to max_freq.
- */
- if (scaling > perf_ctl_scaling) {
- pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu);
-
- if (cpu->pstate.max_pstate == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate);
- if (scaling > perf_ctl_scaling) {
- /*
- * This should not happen, because it would mean that
- * the number of HWP perf levels was less than the
- * number of P-states, so use the PERF_CTL scaling in
- * that case.
- */
- pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu,
- scaling);
-
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- }
+ pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
/*
- * If the product of the HWP performance scaling factor obtained above
- * and the HWP_CAP highest performance is greater than the maximum turbo
- * frequency corresponding to the pstate_funcs.get_turbo() return value,
- * the scaling factor is too high, so recompute it so that the HWP_CAP
- * highest performance corresponds to the maximum turbo frequency.
+ * If the product of the HWP performance scaling factor and the HWP_CAP
+ * highest performance is greater than the maximum turbo frequency
+ * corresponding to the pstate_funcs.get_turbo() return value, the
+ * scaling factor is too high, so recompute it to make the HWP_CAP
+ * highest performance correspond to the maximum turbo frequency.
*/
if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
- pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling);
-
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
- }
-
- cpu->pstate.scaling = scaling;
+ cpu->pstate.scaling = scaling;
- pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
+ cpu->cpu, scaling);
+ }
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- freq = perf_ctl_max_phys * perf_ctl_scaling;
- cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling);
+ cpu->pstate.max_pstate_physical =
+ DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+ scaling);
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
/*
@@ -1628,40 +1546,6 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
/************************** sysfs end ************************/
-static void intel_pstate_notify_work(struct work_struct *work)
-{
- mutex_lock(&intel_pstate_driver_lock);
- cpufreq_update_policy(smp_processor_id());
- wrmsrl(MSR_HWP_STATUS, 0);
- mutex_unlock(&intel_pstate_driver_lock);
-}
-
-void notify_hwp_interrupt(void)
-{
- unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
- u64 value;
-
- if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- return;
-
- rdmsrl(MSR_HWP_STATUS, value);
- if (!(value & 0x01))
- return;
-
- cpudata = all_cpu_data[this_cpu];
- schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
-}
-
-static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
-{
- /* Enable HWP notification interrupt for guaranteed performance change */
- if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
- }
-}
-
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
@@ -1671,8 +1555,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
-
- intel_pstate_enable_hwp_interrupt(cpudata);
}
static int atom_get_min_pstate(void)
@@ -1900,6 +1782,38 @@ static int knl_get_turbo_pstate(void)
return ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u32 hybrid_ref_perf;
+
+static int hybrid_get_cpu_scaling(int cpu)
+{
+ return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
+ intel_pstate_cppc_nominal(cpu));
+}
+
+static void intel_pstate_cppc_set_cpu_scaling(void)
+{
+ u32 min_nominal_perf = U32_MAX;
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
+
+ if (nominal_perf && nominal_perf < min_nominal_perf)
+ min_nominal_perf = nominal_perf;
+ }
+
+ if (min_nominal_perf < U32_MAX) {
+ hybrid_ref_perf = min_nominal_perf;
+ pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+ }
+}
+#else
+static inline void intel_pstate_cppc_set_cpu_scaling(void)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
+
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1928,10 +1842,8 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU);
int perf_ctl_max_phys = pstate_funcs.get_max_physical();
- int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys :
- pstate_funcs.get_scaling();
+ int perf_ctl_scaling = pstate_funcs.get_scaling();
cpu->pstate.min_pstate = pstate_funcs.get_min();
cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
@@ -1940,10 +1852,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (hwp_active && !hwp_mode_bdw) {
__intel_pstate_get_hwp_cap(cpu);
- if (hybrid_cpu)
- intel_pstate_hybrid_hwp_calibrate(cpu);
- else
+ if (pstate_funcs.get_cpu_scaling) {
+ cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
+ if (cpu->pstate.scaling != perf_ctl_scaling)
+ intel_pstate_hybrid_hwp_adjust(cpu);
+ } else {
cpu->pstate.scaling = perf_ctl_scaling;
+ }
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max();
@@ -3315,6 +3230,9 @@ static int __init intel_pstate_init(void)
if (!default_driver)
default_driver = &intel_pstate;
+ if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
+ intel_pstate_cppc_set_cpu_scaling();
+
goto hwp_cpu_matched;
}
} else {
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
new file mode 100644
index 000000000000..0cf18dd46b92
--- /dev/null
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/energy_model.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#define LUT_MAX_ENTRIES 32U
+#define LUT_FREQ GENMASK(11, 0)
+#define LUT_ROW_SIZE 0x4
+#define CPUFREQ_HW_STATUS BIT(0)
+#define SVS_HW_STATUS BIT(1)
+#define POLL_USEC 1000
+#define TIMEOUT_USEC 300000
+
+enum {
+ REG_FREQ_LUT_TABLE,
+ REG_FREQ_ENABLE,
+ REG_FREQ_PERF_STATE,
+ REG_FREQ_HW_STATE,
+ REG_EM_POWER_TBL,
+ REG_FREQ_LATENCY,
+
+ REG_ARRAY_SIZE,
+};
+
+struct mtk_cpufreq_data {
+ struct cpufreq_frequency_table *table;
+ void __iomem *reg_bases[REG_ARRAY_SIZE];
+ int nr_opp;
+};
+
+static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+};
+
+static int __maybe_unused
+mtk_cpufreq_get_cpu_power(unsigned long *mW,
+ unsigned long *KHz, struct device *cpu_dev)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ int i;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ for (i = 0; i < data->nr_opp; i++) {
+ if (data->table[i].frequency < *KHz)
+ break;
+ }
+ i--;
+
+ *KHz = data->table[i].frequency;
+ *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
+ i * LUT_ROW_SIZE) / 1000;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return 0;
+}
+
+static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return data->table[index].frequency;
+}
+
+static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+ unsigned int index;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int mtk_cpu_create_freq_table(struct platform_device *pdev,
+ struct mtk_cpufreq_data *data)
+{
+ struct device *dev = &pdev->dev;
+ u32 temp, i, freq, prev_freq = 0;
+ void __iomem *base_table;
+
+ data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+ sizeof(*data->table), GFP_KERNEL);
+ if (!data->table)
+ return -ENOMEM;
+
+ base_table = data->reg_bases[REG_FREQ_LUT_TABLE];
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE));
+ freq = FIELD_GET(LUT_FREQ, temp) * 1000;
+
+ if (freq == prev_freq)
+ break;
+
+ data->table[i].frequency = freq;
+
+ dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
+
+ prev_freq = freq;
+ }
+
+ data->table[i].frequency = CPUFREQ_TABLE_END;
+ data->nr_opp = i;
+
+ return 0;
+}
+
+static int mtk_cpu_resources_init(struct platform_device *pdev,
+ struct cpufreq_policy *policy,
+ const u16 *offsets)
+{
+ struct mtk_cpufreq_data *data;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret, i;
+ int index;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus);
+ if (index < 0)
+ return index;
+
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
+ data->reg_bases[i] = base + offsets[i];
+
+ ret = mtk_cpu_create_freq_table(pdev, data);
+ if (ret) {
+ dev_info(dev, "Domain-%d failed to create freq table\n", index);
+ return ret;
+ }
+
+ policy->freq_table = data->table;
+ policy->driver_data = data;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
+ struct mtk_cpufreq_data *data;
+ unsigned int latency;
+ int ret;
+
+ /* Get the bases of cpufreq for domains */
+ ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev));
+ if (ret) {
+ dev_info(&pdev->dev, "CPUFreq resource init failed\n");
+ return ret;
+ }
+
+ data = policy->driver_data;
+
+ latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+ policy->fast_switch_possible = true;
+
+ /* HW should be in enabled state to proceed now */
+ writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]);
+ if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig,
+ (sig & pwr_hw) == pwr_hw, POLL_USEC,
+ TIMEOUT_USEC)) {
+ if (!(sig & CPUFREQ_HW_STATUS)) {
+ pr_info("cpufreq hardware of CPU%d is not enabled\n",
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
+ }
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ /* HW should be in paused state now */
+ writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
+
+ return 0;
+}
+
+static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
+ &em_cb, policy->cpus, true);
+}
+
+static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_hw_target_index,
+ .get = mtk_cpufreq_hw_get,
+ .init = mtk_cpufreq_hw_cpu_init,
+ .exit = mtk_cpufreq_hw_cpu_exit,
+ .register_em = mtk_cpufreq_register_em,
+ .fast_switch = mtk_cpufreq_hw_fast_switch,
+ .name = "mtk-cpufreq-hw",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ const void *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, (void *) data);
+ cpufreq_mtk_hw_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+
+ return ret;
+}
+
+static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+}
+
+static const struct of_device_id mtk_cpufreq_hw_match[] = {
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ {}
+};
+
+static struct platform_driver mtk_cpufreq_hw_driver = {
+ .probe = mtk_cpufreq_hw_driver_probe,
+ .remove = mtk_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "mtk-cpufreq-hw",
+ .of_match_table = mtk_cpufreq_hw_match,
+ },
+};
+module_platform_driver(mtk_cpufreq_hw_driver);
+
+MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek cpufreq-hw driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 87019d5a9547..866163883b48 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -448,8 +448,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
- dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
-
return 0;
}
@@ -471,6 +469,7 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e035ee216b0f..1b50df06c6bc 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -131,7 +131,6 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
- dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
return 0;
}
@@ -150,6 +149,7 @@ static struct cpufreq_driver omap_driver = {
.get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "omap",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f86859bf76f1..a2be0df7e174 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -7,12 +7,14 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -22,10 +24,13 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
+#define HZ_PER_KHZ 1000
+
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_freq_lut;
u32 reg_volt_lut;
+ u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
};
@@ -34,6 +39,16 @@ struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
+
+ /*
+ * Mutex to synchronize between de-init sequence and re-starting LMh
+ * polling/interrupts
+ */
+ struct mutex throttle_lock;
+ int throttle_irq;
+ bool cancel_throttle;
+ struct delayed_work throttle_work;
+ struct cpufreq_policy *policy;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -251,10 +266,92 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
+static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+
+ return (val & 0x3FF) * 19200;
+}
+
+static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+{
+ unsigned long max_capacity, capacity, freq_hz, throttled_freq;
+ struct cpufreq_policy *policy = data->policy;
+ int cpu = cpumask_first(policy->cpus);
+ struct device *dev = get_cpu_device(cpu);
+ struct dev_pm_opp *opp;
+ unsigned int freq;
+
+ /*
+ * Get the h/w throttled frequency, normalize it using the
+ * registered opp table and use it to calculate thermal pressure.
+ */
+ freq = qcom_lmh_get_throttle_freq(data);
+ freq_hz = freq * HZ_PER_KHZ;
+
+ opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ /* Update thermal pressure */
+
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
+
+ /* Don't pass boost capacity to scheduler */
+ if (capacity > max_capacity)
+ capacity = max_capacity;
+
+ arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
+
+ /*
+ * In the unlikely case policy is unregistered do not enable
+ * polling or h/w interrupt
+ */
+ mutex_lock(&data->throttle_lock);
+ if (data->cancel_throttle)
+ goto out;
+
+ /*
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
+ */
+ if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
+ enable_irq(data->throttle_irq);
+ else
+ mod_delayed_work(system_highpri_wq, &data->throttle_work,
+ msecs_to_jiffies(10));
+
+out:
+ mutex_unlock(&data->throttle_lock);
+}
+
+static void qcom_lmh_dcvs_poll(struct work_struct *work)
+{
+ struct qcom_cpufreq_data *data;
+
+ data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
+ qcom_lmh_dcvs_notify(data);
+}
+
+static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
+{
+ struct qcom_cpufreq_data *c_data = data;
+
+ /* Disable interrupt and enable polling */
+ disable_irq_nosync(c_data->throttle_irq);
+ qcom_lmh_dcvs_notify(c_data);
+
+ return 0;
+}
+
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
.reg_enable = 0x0,
.reg_freq_lut = 0x110,
.reg_volt_lut = 0x114,
+ .reg_current_vote = 0x704,
.reg_perf_state = 0x920,
.lut_row_size = 32,
};
@@ -274,6 +371,51 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ char irq_name[15];
+ int ret;
+
+ /*
+ * Look for LMh interrupt. If no interrupt line is specified /
+ * if there is an error, allow cpufreq to be enabled as usual.
+ */
+ data->throttle_irq = platform_get_irq(pdev, index);
+ if (data->throttle_irq <= 0)
+ return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
+
+ data->cancel_throttle = false;
+ data->policy = policy;
+
+ mutex_init(&data->throttle_lock);
+ INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+
+ snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
+ ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
+ IRQF_ONESHOT, irq_name, data);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
+ if (data->throttle_irq <= 0)
+ return;
+
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = true;
+ mutex_unlock(&data->throttle_lock);
+
+ cancel_delayed_work_sync(&data->throttle_work);
+ free_irq(data->throttle_irq, data);
+}
+
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
@@ -348,6 +490,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}
policy->driver_data = data;
+ policy->dvfs_possible_from_any_cpu = true;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -362,14 +505,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
if (ret)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
+ ret = qcom_cpufreq_hw_lmh_init(policy, index);
+ if (ret)
+ goto error;
+
return 0;
error:
kfree(data);
@@ -389,6 +534,7 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
iounmap(base);
@@ -412,6 +558,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 75f818d04b48..1e0cd4d165f0 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -22,7 +22,9 @@
struct scmi_data {
int domain_id;
+ int nr_opp;
struct device *cpu_dev;
+ cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
@@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
- struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
- cpumask_var_t opp_shared_cpus;
- bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
- if (ret || !cpumask_weight(opp_shared_cpus)) {
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
- cpumask_copy(opp_shared_cpus, policy->cpus);
+ cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
@@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
@@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- power_scale_mw = perf_ops->power_scale_mw_get(ph);
- em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
- opp_shared_cpus, power_scale_mw);
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_free_opp;
+ priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
- free_cpumask_var(opp_shared_cpus);
return 0;
-out_free_priv:
- kfree(priv);
-
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
- free_cpumask_var(opp_shared_cpus);
+ free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+ kfree(priv);
return ret;
}
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ struct scmi_data *priv = policy->driver_data;
+
+ /*
+ * This callback will be called for each policy, but we don't need to
+ * register with EM every time. Despite not being part of the same
+ * policy, some CPUs may still share their perf-domains, and a CPU from
+ * another policy may already have registered with EM on behalf of CPUs
+ * of this policy.
+ */
+ if (!priv->nr_opp)
+ return;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+ &em_cb, priv->opp_shared_cpus,
+ power_scale_mw);
+}
+
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
+ .register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index d6a698a1b5d1..bda3e7d42964 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -163,8 +163,6 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = false;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_free_cpufreq_table:
@@ -200,6 +198,7 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
.target_index = scpi_cpufreq_set_target,
+ .register_em = cpufreq_register_em_with_opp,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 1a251e635ebd..b8704232c27b 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -145,16 +145,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void sh_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct device *dev = get_cpu_device(policy->cpu);
-
- dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
- "Maximum %u.%03u MHz.\n",
- policy->min / 1000, policy->min % 1000,
- policy->max / 1000, policy->max % 1000);
-}
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -163,7 +153,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .ready = sh_cpufreq_cpu_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 51dfa9ae6cf5..d295f405c4bb 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -47,7 +46,6 @@ static bool bL_switching_enabled;
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@@ -442,8 +440,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
@@ -455,12 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -473,17 +463,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -493,7 +472,7 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
- .ready = ve_spc_cpufreq_ready,
+ .register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
@@ -553,6 +532,9 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
+ if (!is_bL_switching_enabled())
+ ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
+
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 96bc7b5c6532..6c61817996a3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -306,9 +306,7 @@ static int nitrox_device_flr(struct pci_dev *pdev)
return -ENOMEM;
}
- /* check flr support */
- if (pcie_has_flr(pdev))
- pcie_flr(pdev);
+ pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
pci_restore_state(pdev);
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index 32954059b37b..d1aaabc940f3 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,11 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_BUS) += core/
obj-$(CONFIG_CXL_MEM) += cxl_pci.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
-cxl_core-y := core.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 8ae89273f58e..54e9d4d2cf5f 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
+ if (!acpi_pci_find_root(adev->handle))
+ return NULL;
+
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev;
return NULL;
@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (!bridge)
return 0;
- pci_root = acpi_pci_find_root(bridge->handle);
- if (!pci_root)
- return -ENXIO;
-
dport = find_dport_by_dev(root_port, match);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
+ /*
+ * Note that this lookup already succeeded in
+ * to_cxl_host_bridge(), so no need to check for failure here
+ */
+ pci_root = acpi_pci_find_root(bridge->handle);
ctx = (struct cxl_walk_context){
.dev = host,
.root = pci_root->bus,
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
new file mode 100644
index 000000000000..0fdbf3c6ac1a
--- /dev/null
+++ b/drivers/cxl/core/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_core.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+cxl_core-y := bus.o
+cxl_core-y += pmem.o
+cxl_core-y += regs.o
+cxl_core-y += memdev.o
diff --git a/drivers/cxl/core.c b/drivers/cxl/core/bus.c
index 2b90b7c3b9d7..267d8042bec2 100644
--- a/drivers/cxl/core.c
+++ b/drivers/cxl/core/bus.c
@@ -6,14 +6,22 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include "cxl.h"
-#include "mem.h"
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
/**
* DOC: cxl core
*
- * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
- * point for cross-device interleave coordination through cxl ports.
+ * The CXL core provides a set of interfaces that can be consumed by CXL aware
+ * drivers. The interfaces allow for creation, modification, and destruction of
+ * regions, memory devices, ports, and decoders. CXL aware drivers must register
+ * with the CXL core via these interfaces in order to be able to participate in
+ * cross-device interleave coordination. The CXL core also establishes and
+ * maintains the bridge to the nvdimm subsystem.
+ *
+ * CXL core introduces sysfs hierarchy to control the devices that are
+ * instantiated by the core.
*/
static DEFINE_IDA(cxl_port_ida);
@@ -30,7 +38,7 @@ static struct attribute *cxl_base_attributes[] = {
NULL,
};
-static struct attribute_group cxl_base_attribute_group = {
+struct attribute_group cxl_base_attribute_group = {
.attrs = cxl_base_attributes,
};
@@ -507,11 +515,6 @@ err:
return ERR_PTR(rc);
}
-static void unregister_dev(void *dev)
-{
- device_unregister(dev);
-}
-
struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
@@ -536,7 +539,7 @@ devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
if (rc)
goto err;
- rc = devm_add_action_or_reset(host, unregister_dev, dev);
+ rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
if (rc)
return ERR_PTR(rc);
return cxld;
@@ -548,429 +551,6 @@ err:
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
/**
- * cxl_probe_component_regs() - Detect CXL Component register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping containing the HDM Decoder Capability Header
- * @map: Map object describing the register block information found
- *
- * See CXL 2.0 8.2.4 Component Register Layout and Definition
- * See CXL 2.0 8.2.5.5 CXL Device Register Interface
- *
- * Probe for component register information and return it in map object.
- */
-void cxl_probe_component_regs(struct device *dev, void __iomem *base,
- struct cxl_component_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_component_reg_map) { 0 };
-
- /*
- * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
- * CXL 2.0 8.2.4 Table 141.
- */
- base += CXL_CM_OFFSET;
-
- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
-
- if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
- dev_err(dev,
- "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
- return;
- }
-
- /* It's assumed that future versions will be backward compatible */
- cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- void __iomem *register_block;
- u32 hdr;
- int decoder_cnt;
- u16 cap_id, offset;
- u32 length;
-
- hdr = readl(base + cap * 0x4);
-
- cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
- offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
- register_block = base + offset;
-
- switch (cap_id) {
- case CXL_CM_CAP_CAP_ID_HDM:
- dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
- offset);
-
- hdr = readl(register_block);
-
- decoder_cnt = cxl_hdm_decoder_count(hdr);
- length = 0x20 * decoder_cnt + 0x10;
-
- map->hdm_decoder.valid = true;
- map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
- map->hdm_decoder.size = length;
- break;
- default:
- dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
- offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
-
-static void cxl_nvdimm_bridge_release(struct device *dev)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
-
- kfree(cxl_nvb);
-}
-
-static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_bridge_type = {
- .name = "cxl_nvdimm_bridge",
- .release = cxl_nvdimm_bridge_release,
- .groups = cxl_nvdimm_bridge_attribute_groups,
-};
-
-struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
- "not a cxl_nvdimm_bridge device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm_bridge, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
-
-static struct cxl_nvdimm_bridge *
-cxl_nvdimm_bridge_alloc(struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
-
- cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
- if (!cxl_nvb)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvb->dev;
- cxl_nvb->port = port;
- cxl_nvb->state = CXL_NVB_NEW;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &port->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_bridge_type;
-
- return cxl_nvb;
-}
-
-static void unregister_nvb(void *_cxl_nvb)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
- bool flush;
-
- /*
- * If the bridge was ever activated then there might be in-flight state
- * work to flush. Once the state has been changed to 'dead' then no new
- * work can be queued by user-triggered bind.
- */
- device_lock(&cxl_nvb->dev);
- flush = cxl_nvb->state != CXL_NVB_NEW;
- cxl_nvb->state = CXL_NVB_DEAD;
- device_unlock(&cxl_nvb->dev);
-
- /*
- * Even though the device core will trigger device_release_driver()
- * before the unregister, it does not know about the fact that
- * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
- * release not and flush it before tearing down the nvdimm device
- * hierarchy.
- */
- device_release_driver(&cxl_nvb->dev);
- if (flush)
- flush_work(&cxl_nvb->state_work);
- device_unregister(&cxl_nvb->dev);
-}
-
-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
- struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
- int rc;
-
- if (!IS_ENABLED(CONFIG_CXL_PMEM))
- return ERR_PTR(-ENXIO);
-
- cxl_nvb = cxl_nvdimm_bridge_alloc(port);
- if (IS_ERR(cxl_nvb))
- return cxl_nvb;
-
- dev = &cxl_nvb->dev;
- rc = dev_set_name(dev, "nvdimm-bridge");
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
- if (rc)
- return ERR_PTR(rc);
-
- return cxl_nvb;
-
-err:
- put_device(dev);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
-
-static void cxl_nvdimm_release(struct device *dev)
-{
- struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
-
- kfree(cxl_nvd);
-}
-
-static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_type = {
- .name = "cxl_nvdimm",
- .release = cxl_nvdimm_release,
- .groups = cxl_nvdimm_attribute_groups,
-};
-
-bool is_cxl_nvdimm(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_type;
-}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
-
-struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
- "not a cxl_nvdimm device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
-
-static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
-
- cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
- if (!cxl_nvd)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvd->dev;
- cxl_nvd->cxlmd = cxlmd;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &cxlmd->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_type;
-
- return cxl_nvd;
-}
-
-int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
- int rc;
-
- cxl_nvd = cxl_nvdimm_alloc(cxlmd);
- if (IS_ERR(cxl_nvd))
- return PTR_ERR(cxl_nvd);
-
- dev = &cxl_nvd->dev;
- rc = dev_set_name(dev, "pmem%d", cxlmd->id);
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
- dev_name(dev));
-
- return devm_add_action_or_reset(host, unregister_dev, dev);
-
-err:
- put_device(dev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
-
-/**
- * cxl_probe_device_regs() - Detect CXL Device register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
- * @map: Map object describing the register block information found
- *
- * Probe for device register information and return it in map object.
- */
-void cxl_probe_device_regs(struct device *dev, void __iomem *base,
- struct cxl_device_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_device_reg_map){ 0 };
-
- cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
- if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
- CXLDEV_CAP_ARRAY_CAP_ID)
- return;
-
- cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- u32 offset, length;
- u16 cap_id;
-
- cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
- readl(base + cap * 0x10));
- offset = readl(base + cap * 0x10 + 0x4);
- length = readl(base + cap * 0x10 + 0x8);
-
- switch (cap_id) {
- case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
- dev_dbg(dev, "found Status capability (0x%x)\n", offset);
-
- map->status.valid = true;
- map->status.offset = offset;
- map->status.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
- dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
- map->mbox.valid = true;
- map->mbox.offset = offset;
- map->mbox.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
- dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
- break;
- case CXLDEV_CAP_CAP_ID_MEMDEV:
- dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
- map->memdev.valid = true;
- map->memdev.offset = offset;
- map->memdev.size = length;
- break;
- default:
- if (cap_id >= 0x8000)
- dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
- else
- dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
-
-static void __iomem *devm_cxl_iomap_block(struct device *dev,
- resource_size_t addr,
- resource_size_t length)
-{
- void __iomem *ret_val;
- struct resource *res;
-
- res = devm_request_mem_region(dev, addr, length, dev_name(dev));
- if (!res) {
- resource_size_t end = addr + length - 1;
-
- dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
- return NULL;
- }
-
- ret_val = devm_ioremap(dev, addr, length);
- if (!ret_val)
- dev_err(dev, "Failed to map region %pr\n", res);
-
- return ret_val;
-}
-
-int cxl_map_component_regs(struct pci_dev *pdev,
- struct cxl_component_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
- resource_size_t length;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- phys_addr += map->component_map.hdm_decoder.offset;
- length = map->component_map.hdm_decoder.size;
- regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
- if (!regs->hdm_decoder)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
-
-int cxl_map_device_regs(struct pci_dev *pdev,
- struct cxl_device_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- if (map->device_map.status.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.status.offset;
- length = map->device_map.status.size;
- regs->status = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->status)
- return -ENOMEM;
- }
-
- if (map->device_map.mbox.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.mbox.offset;
- length = map->device_map.mbox.size;
- regs->mbox = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->mbox)
- return -ENOMEM;
- }
-
- if (map->device_map.memdev.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.memdev.offset;
- length = map->device_map.memdev.size;
- regs->memdev = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->memdev)
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
-
-/**
* __cxl_driver_register - register a driver for the cxl bus
* @cxl_drv: cxl driver structure to attach
* @owner: owning module/driver
@@ -1053,12 +633,26 @@ EXPORT_SYMBOL_GPL(cxl_bus_type);
static __init int cxl_core_init(void)
{
- return bus_register(&cxl_bus_type);
+ int rc;
+
+ rc = cxl_memdev_init();
+ if (rc)
+ return rc;
+
+ rc = bus_register(&cxl_bus_type);
+ if (rc)
+ goto err;
+ return 0;
+
+err:
+ cxl_memdev_exit();
+ return rc;
}
static void cxl_core_exit(void)
{
bus_unregister(&cxl_bus_type);
+ cxl_memdev_exit();
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
new file mode 100644
index 000000000000..036a3c8106b4
--- /dev/null
+++ b/drivers/cxl/core/core.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_CORE_H__
+#define __CXL_CORE_H__
+
+extern const struct device_type cxl_nvdimm_bridge_type;
+extern const struct device_type cxl_nvdimm_type;
+
+extern struct attribute_group cxl_base_attribute_group;
+
+static inline void unregister_cxl_dev(void *dev)
+{
+ device_unregister(dev);
+}
+
+int cxl_memdev_init(void);
+void cxl_memdev_exit(void);
+
+#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
new file mode 100644
index 000000000000..a9c317e32010
--- /dev/null
+++ b/drivers/cxl/core/memdev.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+#include "core.h"
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+
+static void cxl_memdev_release(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+ kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid)
+{
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t label_storage_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+}
+static DEVICE_ATTR_RO(label_storage_size);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->ram_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+ __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->pmem_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+ __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_payload_max.attr,
+ &dev_attr_label_storage_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+ &dev_attr_pmem_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+ &dev_attr_ram_size.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+ .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+ .name = "ram",
+ .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+ .name = "pmem",
+ .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+ &cxl_memdev_attribute_group,
+ &cxl_memdev_ram_attribute_group,
+ &cxl_memdev_pmem_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+ .name = "cxl_memdev",
+ .release = cxl_memdev_release,
+ .devnode = cxl_memdev_devnode,
+ .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxl_memdev_unregister(void *_cxlmd)
+{
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+ struct cdev *cdev = &cxlmd->cdev;
+ const struct cdevm_file_operations *cdevm_fops;
+
+ cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
+ cdevm_fops->shutdown(dev);
+
+ cdev_device_del(&cxlmd->cdev, dev);
+ put_device(dev);
+}
+
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+ const struct file_operations *fops)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+ if (!cxlmd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ if (rc < 0)
+ goto err;
+ cxlmd->id = rc;
+
+ dev = &cxlmd->dev;
+ device_initialize(dev);
+ dev->parent = &pdev->dev;
+ dev->bus = &cxl_bus_type;
+ dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+ dev->type = &cxl_memdev_type;
+ device_set_pm_not_required(dev);
+
+ cdev = &cxlmd->cdev;
+ cdev_init(cdev, fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
+ if (IS_ERR(cxlmd))
+ return cxlmd;
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+
+ cdev = &cxlmd->cdev;
+ rc = cdev_device_add(cdev, dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
+
+err:
+ /*
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
+ */
+ cdevm_fops->shutdown(dev);
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+
+__init int cxl_memdev_init(void)
+{
+ dev_t devt;
+ int rc;
+
+ rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+ if (rc)
+ return rc;
+
+ cxl_mem_major = MAJOR(devt);
+
+ return 0;
+}
+
+void cxl_memdev_exit(void)
+{
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
new file mode 100644
index 000000000000..d24570f5b8ba
--- /dev/null
+++ b/drivers/cxl/core/pmem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl pmem
+ *
+ * The core CXL PMEM infrastructure supports persistent memory
+ * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
+ * 'bridge' device is added at the root of a CXL device topology if
+ * platform firmware advertises at least one persistent memory capable
+ * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
+ * device. Then for each cxl_memdev in the CXL device topology a bridge
+ * device is added to host a LIBNVDIMM dimm object. When these bridges
+ * are registered native LIBNVDIMM uapis are translated to CXL
+ * operations, for example, namespace label access commands.
+ */
+
+static void cxl_nvdimm_bridge_release(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ kfree(cxl_nvb);
+}
+
+static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_bridge_type = {
+ .name = "cxl_nvdimm_bridge",
+ .release = cxl_nvdimm_bridge_release,
+ .groups = cxl_nvdimm_bridge_attribute_groups,
+};
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
+ "not a cxl_nvdimm_bridge device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm_bridge, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+
+static struct cxl_nvdimm_bridge *
+cxl_nvdimm_bridge_alloc(struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+
+ cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
+ if (!cxl_nvb)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvb->dev;
+ cxl_nvb->port = port;
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &port->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_bridge_type;
+
+ return cxl_nvb;
+}
+
+static void unregister_nvb(void *_cxl_nvb)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
+ bool flush;
+
+ /*
+ * If the bridge was ever activated then there might be in-flight state
+ * work to flush. Once the state has been changed to 'dead' then no new
+ * work can be queued by user-triggered bind.
+ */
+ device_lock(&cxl_nvb->dev);
+ flush = cxl_nvb->state != CXL_NVB_NEW;
+ cxl_nvb->state = CXL_NVB_DEAD;
+ device_unlock(&cxl_nvb->dev);
+
+ /*
+ * Even though the device core will trigger device_release_driver()
+ * before the unregister, it does not know about the fact that
+ * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
+ * release not and flush it before tearing down the nvdimm device
+ * hierarchy.
+ */
+ device_release_driver(&cxl_nvb->dev);
+ if (flush)
+ flush_work(&cxl_nvb->state_work);
+ device_unregister(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+ * Return: bridge device that can host cxl_nvdimm objects
+ */
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_PMEM))
+ return ERR_PTR(-ENXIO);
+
+ cxl_nvb = cxl_nvdimm_bridge_alloc(port);
+ if (IS_ERR(cxl_nvb))
+ return cxl_nvb;
+
+ dev = &cxl_nvb->dev;
+ rc = dev_set_name(dev, "nvdimm-bridge");
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return cxl_nvb;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+
+static void cxl_nvdimm_release(struct device *dev)
+{
+ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+
+ kfree(cxl_nvd);
+}
+
+static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_type = {
+ .name = "cxl_nvdimm",
+ .release = cxl_nvdimm_release,
+ .groups = cxl_nvdimm_attribute_groups,
+};
+
+bool is_cxl_nvdimm(struct device *dev)
+{
+ return dev->type == &cxl_nvdimm_type;
+}
+EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
+ "not a cxl_nvdimm device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+
+static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+
+ cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
+ if (!cxl_nvd)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvd->dev;
+ cxl_nvd->cxlmd = cxlmd;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlmd->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_type;
+
+ return cxl_nvd;
+}
+
+/**
+ * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
+ * @host: same host as @cxlmd
+ * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+ int rc;
+
+ cxl_nvd = cxl_nvdimm_alloc(cxlmd);
+ if (IS_ERR(cxl_nvd))
+ return PTR_ERR(cxl_nvd);
+
+ dev = &cxl_nvd->dev;
+ rc = dev_set_name(dev, "pmem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
new file mode 100644
index 000000000000..41de4a136ecd
--- /dev/null
+++ b/drivers/cxl/core/regs.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+
+/**
+ * DOC: cxl registers
+ *
+ * CXL device capabilities are enumerated by PCI DVSEC (Designated
+ * Vendor-specific) and / or descriptors provided by platform firmware.
+ * They can be defined as a set like the device and component registers
+ * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
+ * Extended Capabilities, or they can be individual capabilities
+ * appended to bridged and endpoint devices.
+ *
+ * Provide common infrastructure for enumerating and mapping these
+ * discrete capabilities.
+ */
+
+/**
+ * cxl_probe_component_regs() - Detect CXL Component register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping containing the HDM Decoder Capability Header
+ * @map: Map object describing the register block information found
+ *
+ * See CXL 2.0 8.2.4 Component Register Layout and Definition
+ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
+ *
+ * Probe for component register information and return it in map object.
+ */
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+ struct cxl_component_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_component_reg_map) { 0 };
+
+ /*
+ * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
+ * CXL 2.0 8.2.4 Table 141.
+ */
+ base += CXL_CM_OFFSET;
+
+ cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
+
+ if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
+ dev_err(dev,
+ "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
+ return;
+ }
+
+ /* It's assumed that future versions will be backward compatible */
+ cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 hdr;
+ int decoder_cnt;
+ u16 cap_id, offset;
+ u32 length;
+
+ hdr = readl(base + cap * 0x4);
+
+ cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
+ offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
+ register_block = base + offset;
+
+ switch (cap_id) {
+ case CXL_CM_CAP_CAP_ID_HDM:
+ dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
+ offset);
+
+ hdr = readl(register_block);
+
+ decoder_cnt = cxl_hdm_decoder_count(hdr);
+ length = 0x20 * decoder_cnt + 0x10;
+
+ map->hdm_decoder.valid = true;
+ map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
+ map->hdm_decoder.size = length;
+ break;
+ default:
+ dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
+ offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+
+/**
+ * cxl_probe_device_regs() - Detect CXL Device register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
+ * @map: Map object describing the register block information found
+ *
+ * Probe for device register information and return it in map object.
+ */
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+ struct cxl_device_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_device_reg_map){ 0 };
+
+ cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ u32 offset, length;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(base + cap * 0x10));
+ offset = readl(base + cap * 0x10 + 0x4);
+ length = readl(base + cap * 0x10 + 0x8);
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+
+ map->status.valid = true;
+ map->status.offset = offset;
+ map->status.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ map->mbox.valid = true;
+ map->mbox.offset = offset;
+ map->mbox.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ map->memdev.valid = true;
+ map->memdev.offset = offset;
+ map->memdev.size = length;
+ break;
+ default:
+ if (cap_id >= 0x8000)
+ dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
+ else
+ dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+
+static void __iomem *devm_cxl_iomap_block(struct device *dev,
+ resource_size_t addr,
+ resource_size_t length)
+{
+ void __iomem *ret_val;
+ struct resource *res;
+
+ res = devm_request_mem_region(dev, addr, length, dev_name(dev));
+ if (!res) {
+ resource_size_t end = addr + length - 1;
+
+ dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
+ return NULL;
+ }
+
+ ret_val = devm_ioremap(dev, addr, length);
+ if (!ret_val)
+ dev_err(dev, "Failed to map region %pr\n", res);
+
+ return ret_val;
+}
+
+int cxl_map_component_regs(struct pci_dev *pdev,
+ struct cxl_component_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+ resource_size_t length;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ phys_addr += map->component_map.hdm_decoder.offset;
+ length = map->component_map.hdm_decoder.size;
+ regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
+ if (!regs->hdm_decoder)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+
+int cxl_map_device_regs(struct pci_dev *pdev,
+ struct cxl_device_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ if (map->device_map.status.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.status.offset;
+ length = map->device_map.status.size;
+ regs->status = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->status)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.mbox.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.mbox.offset;
+ length = map->device_map.mbox.size;
+ regs->mbox = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->mbox)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.memdev.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.memdev.offset;
+ length = map->device_map.memdev.size;
+ regs->memdev = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->memdev)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_device_regs);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6bda39a59e3..53927f9fa77e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -140,7 +140,6 @@ struct cxl_device_reg_map {
};
struct cxl_register_map {
- struct list_head list;
u64 block_offset;
u8 reg_type;
u8 barno;
diff --git a/drivers/cxl/mem.h b/drivers/cxl/cxlmem.h
index 8f02d02b26b4..6c0b1e2ea97c 100644
--- a/drivers/cxl/mem.h
+++ b/drivers/cxl/cxlmem.h
@@ -28,11 +28,20 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
-/*
- * An entire PCI topology full of devices should be enough for any
- * config
+/**
+ * struct cdevm_file_operations - devm coordinated cdev file operations
+ * @fops: file operations that are synchronized against @shutdown
+ * @shutdown: disconnect driver data
+ *
+ * @shutdown is invoked in the devres release path to disconnect any
+ * driver instance data from @dev. It assumes synchronization with any
+ * fops operation that requires driver data. After @shutdown an
+ * operation may only reference @device data.
*/
-#define CXL_MEM_MAX_DEVS 65536
+struct cdevm_file_operations {
+ struct file_operations fops;
+ void (*shutdown)(struct device *dev);
+};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
@@ -48,6 +57,15 @@ struct cxl_memdev {
int id;
};
+static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+ return container_of(dev, struct cxl_memdev, dev);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops);
+
/**
* struct cxl_mem - A CXL memory device
* @pdev: The PCI device associated with this CXL device.
@@ -77,5 +95,14 @@ struct cxl_mem {
struct range pmem_range;
struct range ram_range;
+ u64 total_bytes;
+ u64 volatile_only_bytes;
+ u64 persistent_only_bytes;
+ u64 partition_align_bytes;
+
+ u64 active_volatile_bytes;
+ u64 active_persistent_bytes;
+ u64 next_volatile_bytes;
+ u64 next_persistent_bytes;
};
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4cf351a3cf99..8e45aa07d662 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -12,9 +12,9 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "cxlmem.h"
#include "pci.h"
#include "cxl.h"
-#include "mem.h"
/**
* DOC: cxl pci
@@ -64,6 +64,15 @@ enum opcode {
CXL_MBOX_OP_MAX = 0x10000
};
+/*
+ * CXL 2.0 - Memory capacity multiplier
+ * See Section 8.2.9.5
+ *
+ * Volatile, Persistent, and Partition capacities are specified to be in
+ * multiples of 256MB - define a multiplier to convert to/from bytes.
+ */
+#define CXL_CAPACITY_MULTIPLIER SZ_256M
+
/**
* struct mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
@@ -94,8 +103,6 @@ struct mbox_cmd {
#define CXL_MBOX_SUCCESS 0
};
-static int cxl_mem_major;
-static DEFINE_IDA(cxl_memdev_ida);
static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -568,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
return false;
- if (security_locked_down(LOCKDOWN_NONE))
+ if (security_locked_down(LOCKDOWN_PCI_ACCESS))
return false;
if (cxl_raw_allow_all)
@@ -806,13 +813,25 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations cxl_memdev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = cxl_memdev_ioctl,
- .open = cxl_memdev_open,
- .release = cxl_memdev_release_file,
- .compat_ioctl = compat_ptr_ioctl,
- .llseek = noop_llseek,
+static void cxl_memdev_shutdown(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ down_write(&cxl_memdev_rwsem);
+ cxlmd->cxlm = NULL;
+ up_write(&cxl_memdev_rwsem);
+}
+
+static const struct cdevm_file_operations cxl_memdev_fops = {
+ .fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+ },
+ .shutdown = cxl_memdev_shutdown,
};
static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
@@ -1022,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
!dev_map->memdev.valid) {
dev_err(dev, "registers not found: %s%s%s\n",
!dev_map->status.valid ? "status " : "",
- !dev_map->mbox.valid ? "status " : "",
- !dev_map->memdev.valid ? "status " : "");
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
return -ENXIO;
}
@@ -1081,9 +1100,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
struct device *dev = &pdev->dev;
u32 regloc_size, regblocks;
void __iomem *base;
- int regloc, i;
- struct cxl_register_map *map, *n;
- LIST_HEAD(register_maps);
+ int regloc, i, n_maps;
+ struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
int ret = 0;
regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
@@ -1102,20 +1120,12 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
- for (i = 0; i < regblocks; i++, regloc += 8) {
+ for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
u32 reg_lo, reg_hi;
u8 reg_type;
u64 offset;
u8 bar;
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map) {
- ret = -ENOMEM;
- goto free_maps;
- }
-
- list_add(&map->list, &register_maps);
-
pci_read_config_dword(pdev, regloc, &reg_lo);
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
@@ -1125,12 +1135,15 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
bar, offset, reg_type);
+ /* Ignore unknown register block types */
+ if (reg_type > CXL_REGLOC_RBI_MEMDEV)
+ continue;
+
base = cxl_mem_map_regblock(cxlm, bar, offset);
- if (!base) {
- ret = -ENOMEM;
- goto free_maps;
- }
+ if (!base)
+ return -ENOMEM;
+ map = &maps[n_maps];
map->barno = bar;
map->block_offset = offset;
map->reg_type = reg_type;
@@ -1141,240 +1154,22 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
cxl_mem_unmap_regblock(cxlm, base);
if (ret)
- goto free_maps;
+ return ret;
+
+ n_maps++;
}
pci_release_mem_regions(pdev);
- list_for_each_entry(map, &register_maps, list) {
- ret = cxl_map_regs(cxlm, map);
+ for (i = 0; i < n_maps; i++) {
+ ret = cxl_map_regs(cxlm, &maps[i]);
if (ret)
- goto free_maps;
- }
-
-free_maps:
- list_for_each_entry_safe(map, n, &register_maps, list) {
- list_del(&map->list);
- kfree(map);
+ break;
}
return ret;
}
-static struct cxl_memdev *to_cxl_memdev(struct device *dev)
-{
- return container_of(dev, struct cxl_memdev, dev);
-}
-
-static void cxl_memdev_release(struct device *dev)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-
- ida_free(&cxl_memdev_ida, cxlmd->id);
- kfree(cxlmd);
-}
-
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid)
-{
- return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
-}
-static DEVICE_ATTR_RO(firmware_version);
-
-static ssize_t payload_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
-}
-static DEVICE_ATTR_RO(payload_max);
-
-static ssize_t label_storage_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
-}
-static DEVICE_ATTR_RO(label_storage_size);
-
-static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->ram_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_ram_size =
- __ATTR(size, 0444, ram_size_show, NULL);
-
-static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->pmem_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_pmem_size =
- __ATTR(size, 0444, pmem_size_show, NULL);
-
-static struct attribute *cxl_memdev_attributes[] = {
- &dev_attr_firmware_version.attr,
- &dev_attr_payload_max.attr,
- &dev_attr_label_storage_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_pmem_attributes[] = {
- &dev_attr_pmem_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_ram_attributes[] = {
- &dev_attr_ram_size.attr,
- NULL,
-};
-
-static struct attribute_group cxl_memdev_attribute_group = {
- .attrs = cxl_memdev_attributes,
-};
-
-static struct attribute_group cxl_memdev_ram_attribute_group = {
- .name = "ram",
- .attrs = cxl_memdev_ram_attributes,
-};
-
-static struct attribute_group cxl_memdev_pmem_attribute_group = {
- .name = "pmem",
- .attrs = cxl_memdev_pmem_attributes,
-};
-
-static const struct attribute_group *cxl_memdev_attribute_groups[] = {
- &cxl_memdev_attribute_group,
- &cxl_memdev_ram_attribute_group,
- &cxl_memdev_pmem_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_memdev_type = {
- .name = "cxl_memdev",
- .release = cxl_memdev_release,
- .devnode = cxl_memdev_devnode,
- .groups = cxl_memdev_attribute_groups,
-};
-
-static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
-{
- down_write(&cxl_memdev_rwsem);
- cxlmd->cxlm = NULL;
- up_write(&cxl_memdev_rwsem);
-}
-
-static void cxl_memdev_unregister(void *_cxlmd)
-{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- cdev_device_del(&cxlmd->cdev, dev);
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
-}
-
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
-{
- struct pci_dev *pdev = cxlm->pdev;
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
- if (!cxlmd)
- return ERR_PTR(-ENOMEM);
-
- rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
- if (rc < 0)
- goto err;
- cxlmd->id = rc;
-
- dev = &cxlmd->dev;
- device_initialize(dev);
- dev->parent = &pdev->dev;
- dev->bus = &cxl_bus_type;
- dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
- dev->type = &cxl_memdev_type;
- device_set_pm_not_required(dev);
-
- cdev = &cxlmd->cdev;
- cdev_init(cdev, &cxl_memdev_fops);
- return cxlmd;
-
-err:
- kfree(cxlmd);
- return ERR_PTR(rc);
-}
-
-static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
- struct cxl_mem *cxlm)
-{
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = cxl_memdev_alloc(cxlm);
- if (IS_ERR(cxlmd))
- return cxlmd;
-
- dev = &cxlmd->dev;
- rc = dev_set_name(dev, "mem%d", cxlmd->id);
- if (rc)
- goto err;
-
- /*
- * Activate ioctl operations, no cxl_memdev_rwsem manipulation
- * needed as this is ordered with cdev_add() publishing the device.
- */
- cxlmd->cxlm = cxlm;
-
- cdev = &cxlmd->cdev;
- rc = cdev_device_add(cdev, dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
- if (rc)
- return ERR_PTR(rc);
- return cxlmd;
-
-err:
- /*
- * The cdev was briefly live, shutdown any ioctl operations that
- * saw that state.
- */
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
- return ERR_PTR(rc);
-}
-
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
@@ -1469,6 +1264,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
}
/**
+ * cxl_mem_get_partition_info - Get partition info
+ * @cxlm: The device to act on
+ * @active_volatile_bytes: returned active volatile capacity
+ * @active_persistent_bytes: returned active persistent capacity
+ * @next_volatile_bytes: return next volatile capacity
+ * @next_persistent_bytes: return next persistent capacity
+ *
+ * Retrieve the current partition info for the device specified. If not 0, the
+ * 'next' values are pending and take affect on next cold reset.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL @8.2.9.5.2.1 Get Partition Info
+ */
+static int cxl_mem_get_partition_info(struct cxl_mem *cxlm,
+ u64 *active_volatile_bytes,
+ u64 *active_persistent_bytes,
+ u64 *next_volatile_bytes,
+ u64 *next_persistent_bytes)
+{
+ struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+ } __packed pi;
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
+ NULL, 0, &pi, sizeof(pi));
+ if (rc)
+ return rc;
+
+ *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap);
+ *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap);
+ *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap);
+ *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap);
+
+ *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ return 0;
+}
+
+/**
* cxl_mem_enumerate_cmds() - Enumerate commands for a device.
* @cxlm: The device.
*
@@ -1564,16 +1406,27 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
if (rc < 0)
return rc;
- /*
- * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
- * For now, only the capacity is exported in sysfs
- */
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
+ cxlm->total_bytes = le64_to_cpu(id.total_capacity);
+ cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity);
+ cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER;
- cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end =
- le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
+ cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity);
+ cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->partition_align_bytes = le64_to_cpu(id.partition_align);
+ cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n"
+ " total_bytes = %#llx\n"
+ " volatile_only_bytes = %#llx\n"
+ " persistent_only_bytes = %#llx\n"
+ " partition_align_bytes = %#llx\n",
+ cxlm->total_bytes,
+ cxlm->volatile_only_bytes,
+ cxlm->persistent_only_bytes,
+ cxlm->partition_align_bytes);
cxlm->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -1581,6 +1434,49 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
return 0;
}
+static int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+{
+ int rc;
+
+ if (cxlm->partition_align_bytes == 0) {
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
+ cxlm->pmem_range.start = cxlm->volatile_only_bytes;
+ cxlm->pmem_range.end = cxlm->volatile_only_bytes +
+ cxlm->persistent_only_bytes - 1;
+ return 0;
+ }
+
+ rc = cxl_mem_get_partition_info(cxlm,
+ &cxlm->active_volatile_bytes,
+ &cxlm->active_persistent_bytes,
+ &cxlm->next_volatile_bytes,
+ &cxlm->next_persistent_bytes);
+ if (rc < 0) {
+ dev_err(&cxlm->pdev->dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n"
+ " active_volatile_bytes = %#llx\n"
+ " active_persistent_bytes = %#llx\n"
+ " next_volatile_bytes = %#llx\n"
+ " next_persistent_bytes = %#llx\n",
+ cxlm->active_volatile_bytes,
+ cxlm->active_persistent_bytes,
+ cxlm->next_volatile_bytes,
+ cxlm->next_persistent_bytes);
+
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+
+ cxlm->pmem_range.start = cxlm->active_volatile_bytes;
+ cxlm->pmem_range.end = cxlm->active_volatile_bytes +
+ cxlm->active_persistent_bytes - 1;
+
+ return 0;
+}
+
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_memdev *cxlmd;
@@ -1611,7 +1507,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
+ rc = cxl_mem_create_range_info(cxlm);
+ if (rc)
+ return rc;
+
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
@@ -1640,25 +1540,15 @@ static struct pci_driver cxl_mem_driver = {
static __init int cxl_mem_init(void)
{
struct dentry *mbox_debugfs;
- dev_t devt;
int rc;
/* Double check the anonymous union trickery in struct cxl_regs */
BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
offsetof(struct cxl_regs, device_regs.memdev));
- rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
- if (rc)
- return rc;
-
- cxl_mem_major = MAJOR(devt);
-
rc = pci_register_driver(&cxl_mem_driver);
- if (rc) {
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
- CXL_MEM_MAX_DEVS);
+ if (rc)
return rc;
- }
cxl_debugfs = debugfs_create_dir("cxl", NULL);
mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
@@ -1672,7 +1562,6 @@ static __exit void cxl_mem_exit(void)
{
debugfs_remove_recursive(cxl_debugfs);
pci_unregister_driver(&cxl_mem_driver);
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
index dad7a831f65f..8c1a58813816 100644
--- a/drivers/cxl/pci.h
+++ b/drivers/cxl/pci.h
@@ -25,6 +25,7 @@
#define CXL_REGLOC_RBI_COMPONENT 1
#define CXL_REGLOC_RBI_VIRT 2
#define CXL_REGLOC_RBI_MEMDEV 3
+#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0088e41dd2f3..9652c3ee41e7 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -6,7 +6,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
-#include "mem.h"
+#include "cxlmem.h"
#include "cxl.h"
/*
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index ac231cc36359..a37622060fff 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -37,15 +37,16 @@ static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
struct dax_kmem_data {
const char *res_name;
+ int mgid;
struct resource *res[];
};
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
+ unsigned long total_len = 0;
struct dax_kmem_data *data;
- int rc = -ENOMEM;
- int i, mapped = 0;
+ int i, rc, mapped = 0;
int numa_node;
/*
@@ -61,24 +62,44 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range range;
+
+ rc = dax_kmem_range(dev_dax, i, &range);
+ if (rc) {
+ dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
+ i, range.start, range.end);
+ continue;
+ }
+ total_len += range_len(&range);
+ }
+
+ if (!total_len) {
+ dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
+ return -EINVAL;
+ }
+
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
+ rc = memory_group_register_static(numa_node, total_len);
+ if (rc < 0)
+ goto err_reg_mgid;
+ data->mgid = rc;
+
for (i = 0; i < dev_dax->nr_range; i++) {
struct resource *res;
struct range range;
rc = dax_kmem_range(dev_dax, i, &range);
- if (rc) {
- dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
- i, range.start, range.end);
+ if (rc)
continue;
- }
/* Region is permanently reserved if hotremove fails. */
res = request_mem_region(range.start, range_len(&range), data->res_name);
@@ -108,8 +129,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
- rc = add_memory_driver_managed(numa_node, range.start,
- range_len(&range), kmem_name, MHP_NONE);
+ rc = add_memory_driver_managed(data->mgid, range.start,
+ range_len(&range), kmem_name, MHP_NID_IS_MGID);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
@@ -129,6 +150,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return 0;
err_request_mem:
+ memory_group_unregister(data->mgid);
+err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
@@ -156,8 +179,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
if (rc)
continue;
- rc = remove_memory(dev_dax->target_node, range.start,
- range_len(&range));
+ rc = remove_memory(range.start, range_len(&range));
if (rc == 0) {
release_resource(data->res[i]);
kfree(data->res[i]);
@@ -172,6 +194,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
}
if (success >= dev_dax->nr_range) {
+ memory_group_unregister(data->mgid);
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 44736cbd446e..fc89e91beea7 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -17,6 +17,24 @@
#include <linux/fs.h>
#include "dax-private.h"
+/**
+ * struct dax_device - anchor object for dax services
+ * @inode: core vfs
+ * @cdev: optional character interface for "device dax"
+ * @host: optional name for lookups where the device path is not available
+ * @private: dax driver private data
+ * @flags: state and boolean properties
+ */
+struct dax_device {
+ struct hlist_node list;
+ struct inode inode;
+ struct cdev cdev;
+ const char *host;
+ void *private;
+ unsigned long flags;
+ const struct dax_operations *ops;
+};
+
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
static struct vfsmount *dax_mnt;
@@ -40,6 +58,42 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
+static int dax_host_hash(const char *host)
+{
+ return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
+}
+
+/**
+ * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
+ * @host: alternate name for the device registered by a dax driver
+ */
+static struct dax_device *dax_get_by_host(const char *host)
+{
+ struct dax_device *dax_dev, *found = NULL;
+ int hash, id;
+
+ if (!host)
+ return NULL;
+
+ hash = dax_host_hash(host);
+
+ id = dax_read_lock();
+ spin_lock(&dax_host_lock);
+ hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
+ if (!dax_alive(dax_dev)
+ || strcmp(host, dax_dev->host) != 0)
+ continue;
+
+ if (igrab(&dax_dev->inode))
+ found = dax_dev;
+ break;
+ }
+ spin_unlock(&dax_host_lock);
+ dax_read_unlock(id);
+
+ return found;
+}
+
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
@@ -65,15 +119,13 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-#endif
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
+bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors)
{
bool dax_enabled = false;
pgoff_t pgoff, pgoff_end;
- char buf[BDEVNAME_SIZE];
void *kaddr, *end_kaddr;
pfn_t pfn, end_pfn;
sector_t last_page;
@@ -81,29 +133,25 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id;
if (blocksize != PAGE_SIZE) {
- pr_info("%s: error: unsupported blocksize for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
return false;
}
if (!dax_dev) {
- pr_debug("%s: error: dax unsupported by block device\n",
- bdevname(bdev, buf));
+ pr_debug("%pg: error: dax unsupported by block device\n", bdev);
return false;
}
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
@@ -112,8 +160,8 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
if (len < 1 || len2 < 1) {
- pr_info("%s: error: dax access failed (%ld)\n",
- bdevname(bdev, buf), len < 1 ? len : len2);
+ pr_info("%pg: error: dax access failed (%ld)\n",
+ bdev, len < 1 ? len : len2);
dax_read_unlock(id);
return false;
}
@@ -147,57 +195,32 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
dax_read_unlock(id);
if (!dax_enabled) {
- pr_info("%s: error: dax support not enabled\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: dax support not enabled\n", bdev);
return false;
}
return true;
}
-EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
+EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-/**
- * __bdev_dax_supported() - Check if the device supports dax for filesystem
- * @bdev: block device to check
- * @blocksize: The block size of the device
- *
- * This is a library function for filesystems to check if the block device
- * can be mounted with dax option.
- *
- * Return: true if supported, false if unsupported
- */
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ int blocksize, sector_t start, sector_t len)
{
- struct dax_device *dax_dev;
- struct request_queue *q;
- char buf[BDEVNAME_SIZE];
- bool ret;
+ bool ret = false;
int id;
- q = bdev_get_queue(bdev);
- if (!q || !blk_queue_dax(q)) {
- pr_debug("%s: error: request queue doesn't support dax\n",
- bdevname(bdev, buf));
- return false;
- }
-
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev) {
- pr_debug("%s: error: device does not support dax\n",
- bdevname(bdev, buf));
+ if (!dax_dev)
return false;
- }
id = dax_read_lock();
- ret = dax_supported(dax_dev, bdev, blocksize, 0,
- i_size_read(bdev->bd_inode) / 512);
+ if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
+ ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
+ start, len);
dax_read_unlock(id);
-
- put_dax(dax_dev);
-
return ret;
}
-EXPORT_SYMBOL_GPL(__bdev_dax_supported);
-#endif
+EXPORT_SYMBOL_GPL(dax_supported);
+#endif /* CONFIG_FS_DAX */
+#endif /* CONFIG_BLOCK */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -208,24 +231,6 @@ enum dax_device_flags {
DAXDEV_SYNC,
};
-/**
- * struct dax_device - anchor object for dax services
- * @inode: core vfs
- * @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
- * @private: dax driver private data
- * @flags: state and boolean properties
- */
-struct dax_device {
- struct hlist_node list;
- struct inode inode;
- struct cdev cdev;
- const char *host;
- void *private;
- unsigned long flags;
- const struct dax_operations *ops;
-};
-
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -323,19 +328,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
}
EXPORT_SYMBOL_GPL(dax_direct_access);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- if (!dax_dev)
- return false;
-
- if (!dax_alive(dax_dev))
- return false;
-
- return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
-}
-EXPORT_SYMBOL_GPL(dax_supported);
-
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
@@ -423,11 +415,6 @@ bool dax_alive(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_alive);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
* that any fault handlers or operations that might have seen
@@ -625,38 +612,6 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
- */
-struct dax_device *dax_get_by_host(const char *host)
-{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
-EXPORT_SYMBOL_GPL(dax_get_by_host);
-
-/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 28f3e0ba6cdd..85faa7a5c7d1 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,6 +27,7 @@
#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
+#include <linux/units.h>
#include "governor.h"
#define CREATE_TRACE_POINTS
@@ -34,7 +35,6 @@
#define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false)
#define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false)
-#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
static struct dentry *devfreq_debugfs;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9561e3d2d428..541efe01abc7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -42,6 +42,7 @@ config UDMABUF
config DMABUF_MOVE_NOTIFY
bool "Move notify between drivers (EXPERIMENTAL)"
default n
+ depends on DMA_SHARED_BUFFER
help
Don't pin buffers if the dynamic DMA-buf interface is available on
both the exporter as well as the importer. This fixes a security
@@ -52,6 +53,7 @@ config DMABUF_MOVE_NOTIFY
config DMABUF_DEBUG
bool "DMA-BUF debug checks"
+ depends on DMA_SHARED_BUFFER
default y if DMA_API_DEBUG
help
This option enables additional checks for DMA-BUF importers and
@@ -74,7 +76,7 @@ menuconfig DMABUF_HEAPS
menuconfig DMABUF_SYSFS_STATS
bool "DMA-BUF sysfs statistics"
- select DMA_SHARED_BUFFER
+ depends on DMA_SHARED_BUFFER
help
Choose this option to enable DMA-BUF sysfs statistics
in location /sys/kernel/dmabuf/buffers.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 39b5b46e880f..80c2c03cb014 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -277,10 +277,15 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD_BUS
+ tristate
+ default INTEL_IDXD
+
config INTEL_IDXD
tristate "Intel Data Accelerators support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
depends on PCI_MSI
+ depends on PCI_PASID
depends on SBITMAP
select DMA_ENGINE
help
@@ -291,6 +296,23 @@ config INTEL_IDXD
If unsure, say N.
+config INTEL_IDXD_COMPAT
+ bool "Legacy behavior for idxd driver"
+ depends on PCI && X86_64
+ select INTEL_IDXD_BUS
+ help
+ Compatible driver to support old /sys/bus/dsa/drivers/dsa behavior.
+ The old behavior performed driver bind/unbind for device and wq
+ devices all under the dsa driver. The compat driver will emulate
+ the legacy behavior in order to allow existing support apps (i.e.
+ accel-config) to continue function. It is expected that accel-config
+ v3.2 and earlier will need the compat mode. A distro with later
+ accel-config version can disable this compat config.
+
+ Say Y if you have old applications that require such behavior.
+
+ If unsure, say N.
+
# Config symbol that collects all the dependencies that's necessary to
# support shared virtual memory for the devices supported by idxd.
config INTEL_IDXD_SVM
@@ -315,7 +337,7 @@ config INTEL_IDXD_PERFMON
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA
@@ -716,6 +738,8 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
+source "drivers/dma/ptdma/Kconfig"
+
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index aa69094e3547..616d926cf2a5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
@@ -41,7 +42,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IDXD) += idxd/
+obj-y += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 235f1396f968..5906eae26e2a 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -70,10 +70,22 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
- /* Match device by MMIO and IRQ */
+ /* Match device by MMIO */
if (si->mmio_base_low != lower_32_bits(mem) ||
- si->mmio_base_high != upper_32_bits(mem) ||
- si->gsi_interrupt != irq)
+ si->mmio_base_high != upper_32_bits(mem))
+ return 0;
+
+ /*
+ * acpi_gsi_to_irq() can't be used because some platforms do not save
+ * registered IRQs in the MP table. Instead we just try to register
+ * the GSI, which is the core part of the above mentioned function.
+ */
+ ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
+ if (ret < 0)
+ return 0;
+
+ /* Match device by Linux vIRQ */
+ if (ret != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0fe0676f8e1d..5a2c7573b692 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -691,10 +691,14 @@ static void msgdma_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&mdev->lock, flags);
- /* Read number of responses that are available */
- count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
- dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
- __func__, __LINE__, count);
+ if (mdev->resp) {
+ /* Read number of responses that are available */
+ count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
+ dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
+ __func__, __LINE__, count);
+ } else {
+ count = 1;
+ }
while (count--) {
/*
@@ -703,8 +707,12 @@ static void msgdma_tasklet(struct tasklet_struct *t)
* have any real values, like transferred bytes or error
* bits. So we need to just drop these values.
*/
- size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
+ if (mdev->resp) {
+ size = ioread32(mdev->resp +
+ MSGDMA_RESP_BYTES_TRANSFERRED);
+ status = ioread32(mdev->resp +
+ MSGDMA_RESP_STATUS);
+ }
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
@@ -757,14 +765,21 @@ static void msgdma_dev_remove(struct msgdma_device *mdev)
}
static int request_and_map(struct platform_device *pdev, const char *name,
- struct resource **res, void __iomem **ptr)
+ struct resource **res, void __iomem **ptr,
+ bool optional)
{
struct resource *region;
struct device *device = &pdev->dev;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
- dev_err(device, "resource %s not defined\n", name);
+ if (optional) {
+ *ptr = NULL;
+ dev_info(device, "optional resource %s not defined\n",
+ name);
+ return 0;
+ }
+ dev_err(device, "mandatory resource %s not defined\n", name);
return -ENODEV;
}
@@ -805,17 +820,17 @@ static int msgdma_probe(struct platform_device *pdev)
mdev->dev = &pdev->dev;
/* Map CSR space */
- ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
+ ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
if (ret)
return ret;
/* Map (extended) descriptor space */
- ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
+ ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
if (ret)
return ret;
/* Map response space */
- ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
+ ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
if (ret)
return ret;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64a52bf4d737..ab78e0f6afd7 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2240,10 +2240,16 @@ static struct platform_driver at_xdmac_driver = {
static int __init at_xdmac_init(void)
{
- return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
+ return platform_driver_register(&at_xdmac_driver);
}
subsys_initcall(at_xdmac_init);
+static void __exit at_xdmac_exit(void)
+{
+ platform_driver_unregister(&at_xdmac_driver);
+}
+module_exit(at_xdmac_exit);
+
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index d9e4ac3edb4e..35993ab92154 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -363,12 +363,16 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_DST_PER_POS);
break;
case DMA_DEV_TO_MEM:
reg |= (chan->config.device_fc ?
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
break;
default:
break;
@@ -470,18 +474,13 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
-static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
- u32 handshake_num, bool set)
+static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
{
- unsigned long start = 0;
- unsigned long reg_value;
- unsigned long reg_mask;
- unsigned long reg_set;
- unsigned long mask;
- unsigned long val;
+ struct axi_dma_chip *chip = chan->chip;
+ unsigned long reg_value, val;
if (!chip->apb_regs) {
- dev_dbg(chip->dev, "apb_regs not initialized\n");
+ dev_err(chip->dev, "apb_regs not initialized\n");
return;
}
@@ -490,26 +489,22 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
* Lock the DMA channel by assign a handshake number to the channel.
* Unlock the DMA channel by assign 0x3F to the channel.
*/
- if (set) {
- reg_set = UNUSED_CHANNEL;
- val = handshake_num;
- } else {
- reg_set = handshake_num;
+ if (set)
+ val = chan->hw_handshake_num;
+ else
val = UNUSED_CHANNEL;
- }
reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- for_each_set_clump8(start, reg_mask, &reg_value, 64) {
- if (reg_mask == reg_set) {
- mask = GENMASK_ULL(start + 7, start);
- reg_value &= ~mask;
- reg_value |= rol64(val, start);
- lo_hi_writeq(reg_value,
- chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- break;
- }
- }
+ /* Channel is already allocated, set handshake as per channel ID */
+ /* 64 bit write should handle for 8 channels */
+
+ reg_value &= ~(DMA_APB_HS_SEL_MASK <<
+ (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+
+ return;
}
/*
@@ -742,7 +737,7 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
llp = hw_desc->llp;
} while (total_segments);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -822,7 +817,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
llp = hw_desc->llp;
} while (num_sgs);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -1098,8 +1093,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
"%s failed to stop\n", axi_chan_name(chan));
if (chan->direction != DMA_MEM_TO_MEM)
- dw_axi_dma_set_hw_channel(chan->chip,
- chan->hw_handshake_num, false);
+ dw_axi_dma_set_hw_channel(chan, false);
if (chan->direction == DMA_MEM_TO_DEV)
dw_axi_dma_set_byte_halfword(chan, false);
@@ -1296,7 +1290,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return -EINVAL;
chip->dw->hdata->restrict_axi_burst_len = true;
- chip->dw->hdata->axi_rw_burst_len = tmp - 1;
+ chip->dw->hdata->axi_rw_burst_len = tmp;
}
return 0;
@@ -1365,7 +1359,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
-
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
struct axi_dma_chan *chan = &dw->chan[i];
@@ -1386,6 +1379,7 @@ static int dw_probe(struct platform_device *pdev)
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
+ dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index b69897887c76..380005afde16 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -184,6 +184,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
+#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
+#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
@@ -256,6 +258,8 @@ enum {
/* CH_CFG_H */
#define CH_CFG_H_PRIORITY_POS 17
+#define CH_CFG_H_DST_PER_POS 12
+#define CH_CFG_H_SRC_PER_POS 7
#define CH_CFG_H_HS_SEL_DST_POS 4
#define CH_CFG_H_HS_SEL_SRC_POS 3
enum {
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index 3ce44de25d33..58f4078d83fe 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -1,15 +1,144 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013,2018 Intel Corporation
+// Copyright (C) 2013,2018,2020-2021 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
-static void idma32_initialize_chan(struct dw_dma_chan *dwc)
+#define DMA_CTL_CH(x) (0x1000 + (x) * 4)
+#define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
+#define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
+#define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
+#define DMA_REGACCESS_CHID_CFG (0x1400)
+
+#define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
+#define CTL_CH_TRANSFER_MODE_S2S 0
+#define CTL_CH_TRANSFER_MODE_S2D 1
+#define CTL_CH_TRANSFER_MODE_D2S 2
+#define CTL_CH_TRANSFER_MODE_D2D 3
+#define CTL_CH_RD_RS_MASK GENMASK(4, 3)
+#define CTL_CH_WR_RS_MASK GENMASK(6, 5)
+#define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
+#define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
+
+#define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
+#define XBAR_SEL_RX_TX_BIT BIT(16)
+#define XBAR_SEL_RX_TX_SHIFT 16
+
+#define REGACCESS_CHID_MASK GENMASK(2, 0)
+
+static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
+{
+ struct device *slave = dwc->chan.slave;
+
+ if (!slave || !dev_is_pci(slave))
+ return 0;
+
+ return to_pci_dev(slave)->devfn;
+}
+
+static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ void __iomem *misc = __dw_regs(dw);
+ u32 cfghi = 0, cfglo = 0;
+ u8 dst_id, src_id;
+ u32 value;
+
+ /* DMA Channel ID Configuration register must be programmed first */
+ value = readl(misc + DMA_REGACCESS_CHID_CFG);
+
+ value &= ~REGACCESS_CHID_MASK;
+ value |= dwc->chan.chan_id;
+
+ writel(value, misc + DMA_REGACCESS_CHID_CFG);
+
+ /* Configure channel attributes */
+ value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
+ value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
+ value &= ~CTL_CH_TRANSFER_MODE_MASK;
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= CTL_CH_TRANSFER_MODE_D2S;
+ value |= CTL_CH_WR_NON_SNOOP_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value |= CTL_CH_TRANSFER_MODE_S2D;
+ value |= CTL_CH_RD_NON_SNOOP_BIT;
+ break;
+ default:
+ /*
+ * Memory-to-Memory and Device-to-Device are ignored for now.
+ *
+ * For Memory-to-Memory transfers we would need to set mode
+ * and disable snooping on both sides.
+ */
+ return;
+ }
+
+ writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ /* Configure crossbar selection */
+ value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* DEVFN selection */
+ value &= ~XBAR_SEL_DEVID_MASK;
+ value |= idma32_get_slave_devfn(dwc);
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= XBAR_SEL_RX_TX_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value &= ~XBAR_SEL_RX_TX_BIT;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* Configure DMA channel low and high registers */
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ dst_id = dwc->chan.chan_id;
+ src_id = dwc->dws.src_id;
+ break;
+ case DMA_DEV_TO_MEM:
+ dst_id = dwc->dws.dst_id;
+ src_id = dwc->chan.chan_id;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
{
u32 cfghi = 0;
u32 cfglo = 0;
@@ -134,7 +263,10 @@ int idma32_dma_probe(struct dw_dma_chip *chip)
return -ENOMEM;
/* Channel operations */
- dw->initialize_chan = idma32_initialize_chan;
+ if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
+ dw->initialize_chan = idma32_initialize_chan_xbar;
+ else
+ dw->initialize_chan = idma32_initialize_chan_generic;
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 2e1c52eefdeb..563ce73488db 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -74,4 +74,20 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.remove = idma32_dma_remove,
};
+static const struct dw_dma_platform_data xbar_pdata = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+ .quirks = DW_DMA_QUIRK_XBAR_PRESENT,
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
+ .pdata = &xbar_pdata,
+ .probe = idma32_dma_probe,
+ .remove = idma32_dma_remove,
+};
+
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
index c1cf7675b9d1..523ca806837c 100644
--- a/drivers/dma/dw/of.c
+++ b/drivers/dma/dw/of.c
@@ -50,15 +50,10 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
u32 nr_masters;
u32 nr_channels;
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
@@ -76,41 +71,29 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
+ of_property_read_u32(np, "chan_allocation_order", &pdata->chan_allocation_order);
+ of_property_read_u32(np, "chan_priority", &pdata->chan_priority);
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
+ of_property_read_u32(np, "block_size", &pdata->block_size);
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
- } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ /* Try deprecated property first */
+ if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
- if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = mb[tmp];
- } else {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = 1;
- }
+ /* If "data_width" and "data-width" both provided use the latter one */
+ of_property_read_u32_array(np, "data-width", pdata->data_width, nr_masters);
- if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst,
- nr_channels)) {
- memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
- }
+ memset32(pdata->multi_block, 1, nr_channels);
+ of_property_read_u32_array(np, "multi-block", pdata->multi_block, nr_channels);
- if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
- if (tmp > CHAN_PROTCTL_MASK)
- return NULL;
- pdata->protctl = tmp;
- }
+ memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
+ of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, nr_channels);
+
+ of_property_read_u32(np, "snps,dma-protection-control", &pdata->protctl);
+ if (pdata->protctl > CHAN_PROTCTL_MASK)
+ return NULL;
return pdata;
}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 1142aa6f8c4a..26a3f926da02 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -120,9 +120,9 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&xbar_chip_pdata },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 0585d749d935..246118955877 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -149,9 +149,9 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB4", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB5", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB6", (kernel_ulong_t)&xbar_chip_pdata },
{ }
};
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 01027779beb8..98f9ee70362e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -897,7 +897,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
if (data && data->name)
name = data->name;
- ret = clk_enable(edmac->clk);
+ ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
@@ -936,7 +936,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
fail_free_irq:
free_irq(edmac->irq, edmac);
fail_clk_disable:
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
return ret;
}
@@ -969,7 +969,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, d, &list, node)
kfree(desc);
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
free_irq(edmac->irq, edmac);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ae057922ef1..8dd40d00a672 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -291,9 +291,8 @@ static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
if (err) {
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list,
- &dpaa2_chan->comp_free);
+ list_move_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
}
}
err_enqueue:
@@ -626,8 +625,7 @@ static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
dpaa2_comp = to_fsl_qdma_comp(vdesc);
qchan = dpaa2_comp->qchan;
spin_lock_irqsave(&qchan->queue_lock, flags);
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
}
@@ -703,7 +701,7 @@ static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
/* DPDMAI enable */
err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
if (err) {
- dev_err(dev, "dpdmai_enable() faile\n");
+ dev_err(dev, "dpdmai_enable() failed\n");
goto err_enable;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index a259ee010e9b..c855a0e4f9ff 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -133,11 +133,6 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
writel_relaxed(tmp, addr);
}
-static void hisi_dma_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
@@ -544,6 +539,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
+ /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
PCI_IRQ_MSI);
if (ret < 0) {
@@ -551,10 +547,6 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
- if (ret)
- return ret;
-
dma_dev = &hdma_dev->dma_dev;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 6d11558756f8..a1e9f2b3a37c 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,4 +1,12 @@
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+idxd_bus-y := bus.o
+
+obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+idxd_compat-y := compat.o
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
new file mode 100644
index 000000000000..6f84621053c6
--- /dev/null
+++ b/drivers/dma/idxd/bus.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include "idxd.h"
+
+
+int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner,
+ const char *mod_name)
+{
+ struct device_driver *drv = &idxd_drv->drv;
+
+ if (!idxd_drv->type) {
+ pr_debug("driver type not set (%ps)\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ drv->name = idxd_drv->name;
+ drv->bus = &dsa_bus_type;
+ drv->owner = owner;
+ drv->mod_name = mod_name;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(__idxd_driver_register);
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv)
+{
+ driver_unregister(&idxd_drv->drv);
+}
+EXPORT_SYMBOL_GPL(idxd_driver_unregister);
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(drv, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+ int i = 0;
+
+ while (idxd_drv->type[i] != IDXD_DEV_NONE) {
+ if (idxd_dev->type == idxd_drv->type[i])
+ return 1;
+ i++;
+ }
+
+ return 0;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_drv->probe(idxd_dev);
+}
+
+static void idxd_config_bus_remove(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ idxd_drv->remove(idxd_dev);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+};
+EXPORT_SYMBOL_GPL(dsa_bus_type);
+
+static int __init dsa_bus_init(void)
+{
+ return bus_register(&dsa_bus_type);
+}
+module_init(dsa_bus_init);
+
+static void __exit dsa_bus_exit(void)
+{
+ bus_unregister(&dsa_bus_type);
+}
+module_exit(dsa_bus_exit);
+
+MODULE_DESCRIPTION("IDXD driver dsa_bus_type driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e9def577c697..b9b2b4a4124e 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -41,7 +41,7 @@ struct idxd_user_context {
static void idxd_cdev_dev_release(struct device *dev)
{
- struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
+ struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
struct idxd_cdev_context *cdev_ctx;
struct idxd_wq *wq = idxd_cdev->wq;
@@ -218,14 +218,13 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
- unsigned long flags;
__poll_t out = 0;
poll_wait(filp, &wq->err_queue, wait);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return out;
}
@@ -256,9 +255,10 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
if (!idxd_cdev)
return -ENOMEM;
+ idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
idxd_cdev->wq = wq;
cdev = &idxd_cdev->cdev;
- dev = &idxd_cdev->dev;
+ dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
@@ -268,7 +268,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
idxd_cdev->minor = minor;
device_initialize(dev);
- dev->parent = &wq->conf_dev;
+ dev->parent = wq_confdev(wq);
dev->bus = &dsa_bus_type;
dev->type = &idxd_cdev_device_type;
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
@@ -299,10 +299,67 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
- cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
- put_device(&idxd_cdev->dev);
+ cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
+ put_device(cdev_dev(idxd_cdev));
}
+static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_USER;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0)
+ goto err;
+
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
+ goto err_cdev;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_cdev:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_del_cdev(wq);
+ __drv_disable_wq(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_user_drv = {
+ .probe = idxd_user_drv_probe,
+ .remove = idxd_user_drv_remove,
+ .name = "user",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_user_drv);
+
int idxd_cdev_register(void)
{
int rc, i;
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
new file mode 100644
index 000000000000..3df21615f888
--- /dev/null
+++ b/drivers/dma/idxd/compat.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include "idxd.h"
+
+extern int device_driver_attach(struct device_driver *drv, struct device *dev);
+extern void device_driver_detach(struct device *dev);
+
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+
+static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int rc = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver) {
+ device_driver_detach(dev);
+ rc = count;
+ }
+
+ return rc;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
+
+static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ struct device_driver *alt_drv = NULL;
+ int rc = -ENODEV;
+ struct idxd_dev *idxd_dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev || dev->driver || drv != &dsa_drv.drv)
+ return -ENODEV;
+
+ idxd_dev = confdev_to_idxd_dev(dev);
+ if (is_idxd_dev(idxd_dev)) {
+ alt_drv = driver_find("idxd", bus);
+ } else if (is_idxd_wq_dev(idxd_dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ if (is_idxd_wq_kernel(wq))
+ alt_drv = driver_find("dmaengine", bus);
+ else if (is_idxd_wq_user(wq))
+ alt_drv = driver_find("user", bus);
+ }
+ if (!alt_drv)
+ return -ENODEV;
+
+ rc = device_driver_attach(alt_drv, dev);
+ if (rc < 0)
+ return rc;
+
+ return count;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
+
+static struct attribute *dsa_drv_compat_attrs[] = {
+ &driver_attr_bind.attr,
+ &driver_attr_unbind.attr,
+ NULL,
+};
+
+static const struct attribute_group dsa_drv_compat_attr_group = {
+ .attrs = dsa_drv_compat_attrs,
+};
+
+static const struct attribute_group *dsa_drv_compat_groups[] = {
+ &dsa_drv_compat_attr_group,
+ NULL,
+};
+
+static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev)
+{
+ return -ENODEV;
+}
+
+static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev)
+{
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver dsa_drv = {
+ .name = "dsa",
+ .probe = idxd_dsa_drv_probe,
+ .remove = idxd_dsa_drv_remove,
+ .type = dev_types,
+ .drv = {
+ .suppress_bind_attrs = true,
+ .groups = dsa_drv_compat_groups,
+ },
+};
+
+module_idxd_driver(dsa_drv);
+MODULE_IMPORT_NS(IDXD);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 420b93fe5feb..83a5ff2ecf2a 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -15,6 +15,8 @@
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
@@ -139,8 +141,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (wq->type != IDXD_WQT_KERNEL)
return 0;
- wq->num_descs = wq->size;
- num_descs = wq->size;
+ num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
+ wq->num_descs = num_descs;
rc = alloc_hw_descs(wq, num_descs);
if (rc < 0)
@@ -234,7 +236,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
return 0;
}
-int idxd_wq_disable(struct idxd_wq *wq)
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -255,6 +257,8 @@ int idxd_wq_disable(struct idxd_wq *wq)
return -ENXIO;
}
+ if (reset_config)
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
@@ -289,6 +293,7 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
@@ -315,6 +320,7 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->portal);
wq->portal = NULL;
+ wq->portal_offset = 0;
}
void idxd_wqs_unmap_portal(struct idxd_device *idxd)
@@ -335,19 +341,18 @@ int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -362,19 +367,18 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 0;
wqcfg.pasid = 0;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -383,11 +387,11 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
return 0;
}
-void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- lockdep_assert_held(&idxd->dev_lock);
+ lockdep_assert_held(&wq->wq_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->size = 0;
@@ -396,6 +400,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->priority = 0;
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
}
@@ -455,7 +460,6 @@ int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
- unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -465,13 +469,13 @@ int idxd_device_init_reset(struct idxd_device *idxd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
IDXD_CMDSTS_ACTIVE)
cpu_relax();
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
return 0;
}
@@ -480,7 +484,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
{
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
- unsigned long flags;
+ u32 stat;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -494,7 +498,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@@ -511,18 +515,18 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
wait_for_completion(&done);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
- if (status) {
- *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- idxd->cmd_status = *status & GENMASK(7, 0);
- }
+ stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ spin_lock(&idxd->cmd_lock);
+ if (status)
+ *status = stat;
+ idxd->cmd_status = stat & GENMASK(7, 0);
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
}
int idxd_device_enable(struct idxd_device *idxd)
@@ -548,27 +552,10 @@ int idxd_device_enable(struct idxd_device *idxd)
return 0;
}
-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
-{
- int i;
-
- lockdep_assert_held(&idxd->dev_lock);
-
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_ENABLED) {
- idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
- }
- }
-}
-
int idxd_device_disable(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
u32 status;
- unsigned long flags;
if (!idxd_is_enabled(idxd)) {
dev_dbg(dev, "Device is not enabled\n");
@@ -584,22 +571,20 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
- unsigned long flags;
-
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
}
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
@@ -649,7 +634,6 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
struct device *dev = &idxd->pdev->dev;
u32 operand, status;
union idxd_command_reg cmd;
- unsigned long flags;
if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
return -EOPNOTSUPP;
@@ -667,13 +651,13 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
cpu_relax();
status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
dev_dbg(dev, "release int handle failed: %#x\n", status);
@@ -685,6 +669,59 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
}
/* Device configuration bits */
+static void idxd_engines_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ engine->group = NULL;
+ }
+}
+
+static void idxd_groups_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ memset(&group->grpcfg, 0, sizeof(group->grpcfg));
+ group->num_engines = 0;
+ group->num_wqs = 0;
+ group->use_token_limit = false;
+ group->tokens_allowed = 0;
+ group->tokens_reserved = 0;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+}
+
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ idxd_wq_disable_cleanup(wq);
+ wq->state = IDXD_WQ_DISABLED;
+ }
+ }
+}
+
+void idxd_device_clear_state(struct idxd_device *idxd)
+{
+ idxd_groups_clear_state(idxd);
+ idxd_engines_clear_state(idxd);
+ idxd_device_wqs_clear_state(idxd);
+}
+
void idxd_msix_perm_setup(struct idxd_device *idxd)
{
union msix_perm mperm;
@@ -773,6 +810,15 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
return 0;
}
+static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
+ return true;
+ return false;
+}
+
static int idxd_wq_config_write(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -796,6 +842,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_size = wq->size;
if (wq->size == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
dev_warn(dev, "Incorrect work queue size: 0\n");
return -EINVAL;
}
@@ -804,7 +851,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_thresh = wq->threshold;
/* byte 8-11 */
- wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq))
wq->wqcfg->mode = 1;
@@ -814,6 +860,25 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->pasid = idxd->pasid;
}
+ /*
+ * Here the priv bit is set depending on the WQ type. priv = 1 if the
+ * WQ type is kernel to indicate privileged access. This setting only
+ * matters for dedicated WQ. According to the DSA spec:
+ * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
+ * Privileged Mode Enable field of the PCI Express PASID capability
+ * is 0, this field must be 0.
+ *
+ * In the case of a dedicated kernel WQ that is not able to support
+ * the PASID cap, then the configuration will be rejected.
+ */
+ wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
+ !idxd_device_pasid_priv_enabled(idxd) &&
+ wq->type == IDXD_WQT_KERNEL) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
+ return -EOPNOTSUPP;
+ }
+
wq->wqcfg->priority = wq->priority;
if (idxd->hw.gen_cap.block_on_fault &&
@@ -931,6 +996,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
}
@@ -939,8 +1005,10 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
configured++;
}
- if (configured == 0)
+ if (configured == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
return -EINVAL;
+ }
return 0;
}
@@ -1086,3 +1154,203 @@ int idxd_device_load_config(struct idxd_device *idxd)
return 0;
}
+
+int __drv_enable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int rc = -ENXIO;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
+ goto err;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "wq %d already enabled.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (!wq->group) {
+ dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
+ goto err;
+ }
+
+ if (strlen(wq->name) == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
+ dev_dbg(dev, "wq %d name not set.\n", wq->id);
+ goto err;
+ }
+
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
+ dev_dbg(dev, "PASID not enabled and shared wq.\n");
+ goto err;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
+ dev_dbg(dev, "Shared wq and threshold 0.\n");
+ goto err;
+ }
+ }
+
+ rc = 0;
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0) {
+ dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
+ dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
+ goto err_map_portal;
+ }
+
+ wq->client_count = 0;
+ return 0;
+
+err_map_portal:
+ rc = idxd_wq_disable(wq, false);
+ if (rc < 0)
+ dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
+err:
+ return rc;
+}
+
+int drv_enable_wq(struct idxd_wq *wq)
+{
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ rc = __drv_enable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+void __drv_disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ idxd_wq_drain(wq);
+ idxd_wq_reset(wq);
+
+ wq->client_count = 0;
+}
+
+void drv_disable_wq(struct idxd_wq *wq)
+{
+ mutex_lock(&wq->wq_lock);
+ __drv_disable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int rc = 0;
+
+ /*
+ * Device should be in disabled state for the idxd_drv to load. If it's in
+ * enabled state, then the device was altered outside of driver's control.
+ * If the state is in halted state, then we don't want to proceed.
+ */
+ if (idxd->state != IDXD_DEV_DISABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
+ return -ENXIO;
+ }
+
+ /* Device configuration */
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0)
+ return -ENXIO;
+
+ /* Start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ return rc;
+
+ /* Setup DMA device without channels */
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ idxd_device_disable(idxd);
+ idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
+ return rc;
+ }
+
+ idxd->cmd_status = 0;
+ return 0;
+}
+
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ struct device *wq_dev = wq_confdev(wq);
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
+ device_release_driver(wq_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ idxd_device_disable(idxd);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ idxd_device_reset(idxd);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_DSA,
+ IDXD_DEV_IAX,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_drv = {
+ .type = dev_types,
+ .probe = idxd_device_drv_probe,
+ .remove = idxd_device_drv_remove,
+ .name = "idxd",
+};
+EXPORT_SYMBOL_GPL(idxd_drv);
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 77439b645044..e0f056c1d1f5 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -69,7 +69,11 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->src_addr = addr_f1;
hw->dst_addr = addr_f2;
hw->xfer_size = len;
- hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ /*
+ * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
+ * field instead. This field should be set to 1 for kernel descriptors.
+ */
+ hw->priv = 1;
hw->completion_addr = compl;
}
@@ -149,10 +153,8 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
- if (rc < 0) {
- idxd_free_desc(wq, desc);
+ if (rc < 0)
return rc;
- }
return cookie;
}
@@ -245,7 +247,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
wq->idxd_chan = idxd_chan;
idxd_chan->wq = wq;
- get_device(&wq->conf_dev);
+ get_device(wq_confdev(wq));
return 0;
}
@@ -260,5 +262,87 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq)
list_del(&chan->device_node);
kfree(wq->idxd_chan);
wq->idxd_chan = NULL;
- put_device(&wq->conf_dev);
+ put_device(wq_confdev(wq));
}
+
+static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_KERNEL;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
+ rc = -ENXIO;
+ goto err;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
+ dev_dbg(dev, "WQ resource alloc failed\n");
+ goto err_res_alloc;
+ }
+
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ goto err_ref;
+ }
+
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
+ dev_dbg(dev, "Failed to register dma channel\n");
+ goto err_dma;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_dma:
+ idxd_wq_quiesce(wq);
+err_ref:
+ idxd_wq_free_resources(wq);
+err_res_alloc:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_quiesce(wq);
+ idxd_unregister_dma_channel(wq);
+ __drv_disable_wq(wq);
+ idxd_wq_free_resources(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_dmaengine_drv = {
+ .probe = idxd_dmaengine_drv_probe,
+ .remove = idxd_dmaengine_drv_remove,
+ .name = "dmaengine",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index fc708be7ad9a..bfcb03329f77 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -11,14 +11,32 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
+#include <uapi/linux/idxd.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
+extern bool tc_override;
-struct idxd_device;
struct idxd_wq;
+struct idxd_dev;
+
+enum idxd_dev_type {
+ IDXD_DEV_NONE = -1,
+ IDXD_DEV_DSA = 0,
+ IDXD_DEV_IAX,
+ IDXD_DEV_WQ,
+ IDXD_DEV_GROUP,
+ IDXD_DEV_ENGINE,
+ IDXD_DEV_CDEV,
+ IDXD_DEV_MAX_TYPE,
+};
+
+struct idxd_dev {
+ struct device conf_dev;
+ enum idxd_dev_type type;
+};
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@@ -34,9 +52,18 @@ enum idxd_type {
#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
+ const char *name;
+ enum idxd_dev_type *type;
+ int (*probe)(struct idxd_dev *idxd_dev);
+ void (*remove)(struct idxd_dev *idxd_dev);
struct device_driver drv;
};
+extern struct idxd_device_driver dsa_drv;
+extern struct idxd_device_driver idxd_drv;
+extern struct idxd_device_driver idxd_dmaengine_drv;
+extern struct idxd_device_driver idxd_user_drv;
+
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
@@ -51,7 +78,7 @@ struct idxd_irq_entry {
};
struct idxd_group {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_device *idxd;
struct grpcfg grpcfg;
int id;
@@ -110,7 +137,7 @@ enum idxd_wq_type {
struct idxd_cdev {
struct idxd_wq *wq;
struct cdev cdev;
- struct device dev;
+ struct idxd_dev idxd_dev;
int minor;
};
@@ -136,9 +163,10 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
+ u32 portal_offset;
struct percpu_ref wq_active;
struct completion wq_dead;
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
@@ -153,7 +181,6 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
- u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
@@ -174,7 +201,7 @@ struct idxd_wq {
};
struct idxd_engine {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
int id;
struct idxd_group *group;
struct idxd_device *idxd;
@@ -194,7 +221,6 @@ struct idxd_hw {
enum idxd_device_state {
IDXD_DEV_HALTED = -1,
IDXD_DEV_DISABLED = 0,
- IDXD_DEV_CONF_READY,
IDXD_DEV_ENABLED,
};
@@ -218,7 +244,7 @@ struct idxd_driver_data {
};
struct idxd_device {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
@@ -226,7 +252,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
- u8 cmd_status;
+ u32 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
@@ -290,7 +316,6 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
- unsigned int vector;
struct idxd_wq *wq;
};
@@ -302,11 +327,62 @@ enum idxd_completion_status {
IDXD_COMP_DESC_ABORT = 0xff,
};
-#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
-#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+#define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
+#define wq_confdev(wq) &wq->idxd_dev.conf_dev
+#define engine_confdev(engine) &engine->idxd_dev.conf_dev
+#define group_confdev(group) &group->idxd_dev.conf_dev
+#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
+
+#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
+#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
+#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
+
+static inline struct idxd_device *confdev_to_idxd(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_idxd(idxd_dev);
+}
+
+static inline struct idxd_wq *confdev_to_wq(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_wq(idxd_dev);
+}
+
+static inline struct idxd_engine *confdev_to_engine(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_engine, idxd_dev);
+}
+
+static inline struct idxd_group *confdev_to_group(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_group, idxd_dev);
+}
+
+static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
+}
+
+static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
+{
+ if (type >= IDXD_DEV_MAX_TYPE) {
+ idev->type = IDXD_DEV_NONE;
+ return;
+ }
+
+ idev->type = type;
+}
extern struct bus_type dsa_bus_type;
-extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
@@ -316,24 +392,24 @@ extern struct device_type idxd_wq_device_type;
extern struct device_type idxd_engine_device_type;
extern struct device_type idxd_group_device_type;
-static inline bool is_dsa_dev(struct device *dev)
+static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &dsa_device_type;
+ return idxd_dev->type == IDXD_DEV_DSA;
}
-static inline bool is_iax_dev(struct device *dev)
+static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &iax_device_type;
+ return idxd_dev->type == IDXD_DEV_IAX;
}
-static inline bool is_idxd_dev(struct device *dev)
+static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
{
- return is_dsa_dev(dev) || is_iax_dev(dev);
+ return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
}
-static inline bool is_idxd_wq_dev(struct device *dev)
+static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &idxd_wq_device_type;
+ return idxd_dev->type == IDXD_DEV_WQ;
}
static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
@@ -343,11 +419,16 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
return false;
}
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+static inline bool is_idxd_wq_user(struct idxd_wq *wq)
{
return wq->type == IDXD_WQT_USER;
}
+static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_KERNEL;
+}
+
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
@@ -389,6 +470,24 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
+#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
+
+/*
+ * Even though this function can be accessed by multiple threads, it is safe to use.
+ * At worst the address gets used more than once before it gets incremented. We don't
+ * hit a threshold until iops becomes many million times a second. So the occasional
+ * reuse of the same address is tolerable compare to using an atomic variable. This is
+ * safe on a system that has atomic load/store for 32bit integers. Given that this is an
+ * Intel iEP device, that should not be a problem.
+ */
+static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
+{
+ int ofs = wq->portal_offset;
+
+ wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
+ return wq->portal + ofs;
+}
+
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
@@ -404,6 +503,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
+ struct module *module, const char *mod_name);
+#define idxd_driver_register(driver) \
+ __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
+
+#define module_idxd_driver(__idxd_driver) \
+ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
+
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
@@ -424,13 +533,20 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
+int idxd_register_idxd_drv(void);
+void idxd_unregister_idxd_drv(void);
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
+int drv_enable_wq(struct idxd_wq *wq);
+int __drv_enable_wq(struct idxd_wq *wq);
+void drv_disable_wq(struct idxd_wq *wq);
+void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
-void idxd_device_cleanup(struct idxd_device *idxd);
+void idxd_device_clear_state(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
-void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
int idxd_device_load_config(struct idxd_device *idxd);
int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
@@ -443,12 +559,11 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
-int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
-void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index c0f4c0422f32..eb09bc591c31 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -26,11 +26,16 @@
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+MODULE_IMPORT_NS(IDXD);
static bool sva = true;
module_param(sva, bool, 0644);
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+bool tc_override;
+module_param(tc_override, bool, 0644);
+MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
+
#define DRV_NAME "idxd"
bool support_enqcmd;
@@ -200,6 +205,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct idxd_wq *wq;
+ struct device *conf_dev;
int i, rc;
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
@@ -214,15 +220,17 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(&wq->conf_dev);
- wq->conf_dev.parent = &idxd->conf_dev;
- wq->conf_dev.bus = &dsa_bus_type;
- wq->conf_dev.type = &idxd_wq_device_type;
- rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ device_initialize(wq_confdev(wq));
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -233,7 +241,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -243,8 +251,11 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->wqs[i]->conf_dev);
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ }
return rc;
}
@@ -252,6 +263,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
int i, rc;
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
@@ -266,15 +278,17 @@ static int idxd_setup_engines(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
+ conf_dev = engine_confdev(engine);
engine->id = i;
engine->idxd = idxd;
- device_initialize(&engine->conf_dev);
- engine->conf_dev.parent = &idxd->conf_dev;
- engine->conf_dev.bus = &dsa_bus_type;
- engine->conf_dev.type = &idxd_engine_device_type;
- rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_engine_device_type;
+ rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
- put_device(&engine->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -284,14 +298,18 @@ static int idxd_setup_engines(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->engines[i]->conf_dev);
+ while (--i >= 0) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ }
return rc;
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
struct idxd_group *group;
int i, rc;
@@ -307,28 +325,37 @@ static int idxd_setup_groups(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
+ conf_dev = group_confdev(group);
group->id = i;
group->idxd = idxd;
- device_initialize(&group->conf_dev);
- group->conf_dev.parent = &idxd->conf_dev;
- group->conf_dev.bus = &dsa_bus_type;
- group->conf_dev.type = &idxd_group_device_type;
- rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_group_device_type;
+ rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
- put_device(&group->conf_dev);
+ put_device(conf_dev);
goto err;
}
idxd->groups[i] = group;
- group->tc_a = -1;
- group->tc_b = -1;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ group->tc_a = 1;
+ group->tc_b = 1;
+ } else {
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
}
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->groups[i]->conf_dev);
+ while (--i >= 0) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
return rc;
}
@@ -337,11 +364,11 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
int i;
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
destroy_workqueue(idxd->wq);
}
@@ -381,13 +408,13 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
err_group:
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
kfree(idxd->int_handles);
return rc;
@@ -469,6 +496,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
+ struct device *conf_dev;
struct idxd_device *idxd;
int rc;
@@ -476,19 +504,21 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (!idxd)
return NULL;
+ conf_dev = idxd_confdev(idxd);
idxd->pdev = pdev;
idxd->data = data;
+ idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
- device_initialize(&idxd->conf_dev);
- idxd->conf_dev.parent = dev;
- idxd->conf_dev.bus = &dsa_bus_type;
- idxd->conf_dev.type = idxd->data->dev_type;
- rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = dev;
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = idxd->data->dev_type;
+ rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
- put_device(&idxd->conf_dev);
+ put_device(conf_dev);
return NULL;
}
@@ -639,15 +669,9 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev_dbg(dev, "Set DMA masks\n");
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err;
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err;
@@ -668,8 +692,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dev_register;
}
- idxd->state = IDXD_DEV_CONF_READY;
-
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
@@ -680,7 +702,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(&idxd->conf_dev);
+ put_device(idxd_confdev(idxd));
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -793,7 +815,7 @@ static void idxd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
- device_unregister(&idxd->conf_dev);
+ device_unregister(idxd_confdev(idxd));
}
static struct pci_driver idxd_pci_driver = {
@@ -824,13 +846,17 @@ static int __init idxd_init_module(void)
perfmon_init();
- err = idxd_register_bus_type();
+ err = idxd_driver_register(&idxd_drv);
if (err < 0)
- return err;
+ goto err_idxd_driver_register;
- err = idxd_register_driver();
+ err = idxd_driver_register(&idxd_dmaengine_drv);
if (err < 0)
- goto err_idxd_driver_register;
+ goto err_idxd_dmaengine_driver_register;
+
+ err = idxd_driver_register(&idxd_user_drv);
+ if (err < 0)
+ goto err_idxd_user_driver_register;
err = idxd_cdev_register();
if (err)
@@ -845,19 +871,23 @@ static int __init idxd_init_module(void)
err_pci_register:
idxd_cdev_remove();
err_cdev_register:
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+err_idxd_user_driver_register:
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+err_idxd_dmaengine_driver_register:
+ idxd_driver_unregister(&idxd_drv);
err_idxd_driver_register:
- idxd_unregister_bus_type();
return err;
}
module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+ idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
- idxd_unregister_bus_type();
perfmon_exit();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 4e3a7198c0ca..ca88fa7a328e 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -22,13 +22,6 @@ struct idxd_fault {
struct idxd_device *idxd;
};
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -51,7 +44,7 @@ static void idxd_device_reinit(struct work_struct *work)
rc = idxd_wq_enable(wq);
if (rc < 0) {
dev_warn(dev, "Unable to re-enable wq %s\n",
- dev_name(&wq->conf_dev));
+ dev_name(wq_confdev(wq)));
}
}
}
@@ -59,47 +52,7 @@ static void idxd_device_reinit(struct work_struct *work)
return;
out:
- idxd_device_wqs_clear_state(idxd);
-}
-
-static void idxd_device_fault_work(struct work_struct *work)
-{
- struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
- struct idxd_irq_entry *ie;
- int i;
- int processed;
- int irqcnt = fault->idxd->num_wq_irqs + 1;
-
- for (i = 1; i < irqcnt; i++) {
- ie = &fault->idxd->irq_entries[i];
- irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
-
- irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
- }
-
- kfree(fault);
-}
-
-static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
- u64 fault_addr)
-{
- struct idxd_fault *fault;
-
- fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
- if (!fault)
- return -ENOMEM;
-
- fault->addr = fault_addr;
- fault->idxd = idxd;
- INIT_WORK(&fault->work, idxd_device_fault_work);
- queue_work(idxd->wq, &fault->work);
- return 0;
+ idxd_device_clear_state(idxd);
}
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
@@ -111,7 +64,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
bool err = false;
if (cause & IDXD_INTC_ERR) {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
@@ -136,7 +89,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
}
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
val |= IDXD_INTC_ERR;
for (i = 0; i < 4; i++)
@@ -168,15 +121,6 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (!err)
return 0;
- /*
- * This case should rarely happen and typically is due to software
- * programming error by the driver.
- */
- if (idxd->sw_err.valid &&
- idxd->sw_err.desc_valid &&
- idxd->sw_err.fault_addr)
- idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
-
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
@@ -189,15 +133,15 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
- idxd_device_wqs_clear_state(idxd);
+ idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
@@ -228,127 +172,79 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
-static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
-{
- /*
- * Completion address can be bad as well. Check fault address match for descriptor
- * and completion address.
- */
- if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
- struct idxd_device *idxd = desc->wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
- return true;
- }
-
- return false;
-}
-
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
- int queued = 0;
- unsigned long flags;
- enum idxd_complete_type reason;
- *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- goto out;
-
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
+ return;
llist_for_each_entry_safe(desc, t, head, llnode) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (status) {
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
- (*processed)++;
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
- (*processed)++;
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
} else {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
&irq_entry->work_list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- queued++;
+ spin_unlock(&irq_entry->list_lock);
}
}
-
- out:
- return queued;
}
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
- int queued = 0;
- unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
- enum idxd_complete_type reason;
-
- *processed = 0;
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
/*
* This lock protects list corruption from access of list outside of the irq handler
* thread.
*/
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
if (list_empty(&irq_entry->work_list)) {
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- return 0;
+ spin_unlock(&irq_entry->list_lock);
+ return;
}
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
if (desc->completion->status) {
list_del(&desc->list);
- (*processed)++;
list_add_tail(&desc->list, &flist);
- } else {
- queued++;
}
}
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
list_for_each_entry(desc, &flist, list) {
- u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
-
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
}
-
- return queued;
}
-static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
+irqreturn_t idxd_wq_thread(int irq, void *data)
{
- int rc, processed, total = 0;
+ struct idxd_irq_entry *irq_entry = data;
/*
* There are two lists we are processing. The pending_llist is where
@@ -367,31 +263,9 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* and process the completed entries.
* 4. If the entry is still waiting on hardware, list_add_tail() to
* the work_list.
- * 5. Repeat until no more descriptors.
*/
- do {
- rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- if (rc != 0)
- continue;
-
- rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- } while (rc != 0);
-
- return total;
-}
-
-irqreturn_t idxd_wq_thread(int irq, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- int processed;
-
- processed = idxd_desc_process(irq_entry);
- if (processed == 0)
- return IRQ_NONE;
+ irq_process_work_list(irq_entry);
+ irq_process_pending_llist(irq_entry);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c970c3f025f0..ffc7550a77ee 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -7,6 +7,9 @@
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+#define DEVICE_VERSION_1 0x100
+#define DEVICE_VERSION_2 0x200
+
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
#define IDXD_PORTAL_SIZE PAGE_SIZE
@@ -349,6 +352,9 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
+#define WQCFG_OCCUP_IDX 6
+
+#define WQCFG_OCCUP_MASK 0xffff
/*
* This macro calculates the offset into the WQCFG register
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 36c9c1a89b7e..de76fb4abac2 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -22,21 +22,13 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc->hw->pasid = idxd->pasid;
/*
- * Descriptor completion vectors are 1...N for MSIX. We will round
- * robin through the N vectors.
+ * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
+ * vector 1:1 to the WQ id, we need to add 1
*/
- wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- if (!idxd->int_handles) {
- desc->hw->int_handle = wq->vec_ptr;
- } else {
- /*
- * int_handles are only for descriptor completion. However for device
- * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
- * though we are rotating through 1...N for descriptor interrupts, we
- * need to acqurie the int_handles from 0..N-1.
- */
- desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
- }
+ if (!idxd->int_handles)
+ desc->hw->int_handle = wq->id + 1;
+ else
+ desc->hw->int_handle = idxd->int_handles[wq->id];
return desc;
}
@@ -67,7 +59,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
if (signal_pending_state(TASK_INTERRUPTIBLE, current))
break;
idx = sbitmap_queue_get(sbq, &cpu);
- if (idx > 0)
+ if (idx >= 0)
break;
schedule();
}
@@ -114,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
- unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
- spin_lock_irqsave(&ie->list_lock, flags);
+ spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
@@ -135,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (!found)
found = list_abort_desc(wq, ie, desc);
- spin_unlock_irqrestore(&ie->list_lock, flags);
+ spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
@@ -148,13 +139,17 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
- if (idxd->state != IDXD_DEV_ENABLED)
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd_free_desc(wq, desc);
return -EIO;
+ }
- if (!percpu_ref_tryget_live(&wq->wq_active))
+ if (!percpu_ref_tryget_live(&wq->wq_active)) {
+ idxd_free_desc(wq, desc);
return -ENXIO;
+ }
- portal = wq->portal;
+ portal = idxd_wq_portal_addr(wq);
/*
* The wmb() flushes writes to coherent DMA data before
@@ -168,7 +163,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- ie = &idxd->irq_entries[desc->vector];
+ ie = &idxd->irq_entries[wq->id + 1];
llist_add(&desc->llnode, &ie->pending_llist);
}
@@ -183,8 +178,12 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0) {
+ percpu_ref_put(&wq->wq_active);
+ /* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
+ else
+ idxd_free_desc(wq, desc);
return rc;
}
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 26d8ff97d13d..a9025be940db 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -16,336 +16,11 @@ static char *idxd_wq_type_names[] = {
[IDXD_WQT_USER] = "user",
};
-static int idxd_config_bus_match(struct device *dev,
- struct device_driver *drv)
-{
- int matched = 0;
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY)
- return 0;
- matched = 1;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
- struct idxd_device *idxd = wq->idxd;
-
- if (idxd->state < IDXD_DEV_CONF_READY)
- return 0;
-
- if (wq->state != IDXD_WQ_DISABLED) {
- dev_dbg(dev, "%s not disabled\n", dev_name(dev));
- return 0;
- }
- matched = 1;
- }
-
- if (matched)
- dev_dbg(dev, "%s matched\n", dev_name(dev));
-
- return matched;
-}
-
-static int enable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- unsigned long flags;
- int rc;
-
- mutex_lock(&wq->wq_lock);
-
- if (idxd->state != IDXD_DEV_ENABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Enabling while device not enabled.\n");
- return -EPERM;
- }
-
- if (wq->state != IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d already enabled.\n", wq->id);
- return -EBUSY;
- }
-
- if (!wq->group) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ not attached to group.\n");
- return -EINVAL;
- }
-
- if (strlen(wq->name) == 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ name not set.\n");
- return -EINVAL;
- }
-
- /* Shared WQ checks */
- if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
- dev_warn(dev, "PASID not enabled and shared WQ.\n");
- mutex_unlock(&wq->wq_lock);
- return -ENXIO;
- }
- /*
- * Shared wq with the threshold set to 0 means the user
- * did not set the threshold or transitioned from a
- * dedicated wq but did not set threshold. A value
- * of 0 would effectively disable the shared wq. The
- * driver does not allow a value of 0 to be set for
- * threshold via sysfs.
- */
- if (wq->threshold == 0) {
- dev_warn(dev, "Shared WQ and threshold 0.\n");
- mutex_unlock(&wq->wq_lock);
- return -EINVAL;
- }
- }
-
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ resource alloc failed\n");
- return rc;
- }
-
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_enable(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_map_portal(wq);
- if (rc < 0) {
- dev_warn(dev, "wq portal mapping failed: %d\n", rc);
- rc = idxd_wq_disable(wq);
- if (rc < 0)
- dev_warn(dev, "IDXD wq disable failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
-
- wq->client_count = 0;
-
- if (wq->type == IDXD_WQT_KERNEL) {
- rc = idxd_wq_init_percpu_ref(wq);
- if (rc < 0) {
- dev_dbg(dev, "percpu_ref setup failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- if (is_idxd_wq_dmaengine(wq)) {
- rc = idxd_register_dma_channel(wq);
- if (rc < 0) {
- dev_dbg(dev, "DMA channel register failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- } else if (is_idxd_wq_cdev(wq)) {
- rc = idxd_wq_add_cdev(wq);
- if (rc < 0) {
- dev_dbg(dev, "Cdev creation failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- mutex_unlock(&wq->wq_lock);
- dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
-
- return 0;
-}
-
-static int idxd_config_bus_probe(struct device *dev)
-{
- int rc = 0;
- unsigned long flags;
-
- dev_dbg(dev, "%s called\n", __func__);
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY) {
- dev_warn(dev, "Device not ready for config\n");
- return -EBUSY;
- }
-
- if (!try_module_get(THIS_MODULE))
- return -ENXIO;
-
- /* Perform IDXD configuration and enabling */
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device config failed: %d\n", rc);
- return rc;
- }
-
- /* start device */
- rc = idxd_device_enable(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device enable failed: %d\n", rc);
- return rc;
- }
-
- dev_info(dev, "Device %s enabled\n", dev_name(dev));
-
- rc = idxd_register_dma_device(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_dbg(dev, "Failed to register dmaengine device\n");
- return rc;
- }
- return 0;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- return enable_wq(wq);
- }
-
- return -ENODEV;
-}
-
-static void disable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- mutex_lock(&wq->wq_lock);
- dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
- if (wq->state == IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- return;
- }
-
- if (wq->type == IDXD_WQT_KERNEL)
- idxd_wq_quiesce(wq);
-
- if (is_idxd_wq_dmaengine(wq))
- idxd_unregister_dma_channel(wq);
- else if (is_idxd_wq_cdev(wq))
- idxd_wq_del_cdev(wq);
-
- if (idxd_wq_refcount(wq))
- dev_warn(dev, "Clients has claim on wq %d: %d\n",
- wq->id, idxd_wq_refcount(wq));
-
- idxd_wq_unmap_portal(wq);
-
- idxd_wq_drain(wq);
- idxd_wq_reset(wq);
-
- idxd_wq_free_resources(wq);
- wq->client_count = 0;
- mutex_unlock(&wq->wq_lock);
-
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
-}
-
-static void idxd_config_bus_remove(struct device *dev)
-{
- int rc;
-
- dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
-
- /* disable workqueue here */
- if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- disable_wq(wq);
- } else if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
- int i;
-
- dev_dbg(dev, "%s removing dev %s\n", __func__,
- dev_name(&idxd->conf_dev));
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_DISABLED)
- continue;
- dev_warn(dev, "Active wq %d on disable %s.\n", i,
- dev_name(&idxd->conf_dev));
- device_release_driver(&wq->conf_dev);
- }
-
- idxd_unregister_dma_device(idxd);
- rc = idxd_device_disable(idxd);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- mutex_lock(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
- mutex_unlock(&wq->wq_lock);
- }
- }
- module_put(THIS_MODULE);
- if (rc < 0)
- dev_warn(dev, "Device disable failed\n");
- else
- dev_info(dev, "Device %s disabled\n", dev_name(dev));
-
- }
-}
-
-static void idxd_config_bus_shutdown(struct device *dev)
-{
- dev_dbg(dev, "%s called\n", __func__);
-}
-
-struct bus_type dsa_bus_type = {
- .name = "dsa",
- .match = idxd_config_bus_match,
- .probe = idxd_config_bus_probe,
- .remove = idxd_config_bus_remove,
- .shutdown = idxd_config_bus_shutdown,
-};
-
-static struct idxd_device_driver dsa_drv = {
- .drv = {
- .name = "dsa",
- .bus = &dsa_bus_type,
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
-};
-
-/* IDXD generic driver setup */
-int idxd_register_driver(void)
-{
- return driver_register(&dsa_drv.drv);
-}
-
-void idxd_unregister_driver(void)
-{
- driver_unregister(&dsa_drv.drv);
-}
-
/* IDXD engine attributes */
static ssize_t engine_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
if (engine->group)
return sysfs_emit(buf, "%d\n", engine->group->id);
@@ -357,8 +32,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
struct idxd_device *idxd = engine->idxd;
long id;
int rc;
@@ -412,7 +86,7 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
static void idxd_conf_engine_release(struct device *dev)
{
- struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
kfree(engine);
}
@@ -442,8 +116,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_reserved);
}
@@ -452,8 +125,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -490,8 +162,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_allowed);
}
@@ -500,8 +171,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -535,8 +205,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->use_token_limit);
}
@@ -545,8 +214,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -578,8 +246,7 @@ static struct device_attribute dev_attr_group_use_token_limit =
static ssize_t group_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -607,8 +274,7 @@ static struct device_attribute dev_attr_group_engines =
static ssize_t group_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -637,8 +303,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_a);
}
@@ -647,8 +312,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -663,6 +327,9 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -678,8 +345,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_b);
}
@@ -688,8 +354,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -704,6 +369,9 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -737,7 +405,7 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
static void idxd_conf_group_release(struct device *dev)
{
- struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
kfree(group);
}
@@ -752,7 +420,7 @@ struct device_type idxd_group_device_type = {
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%d\n", wq->client_count);
}
@@ -763,7 +431,7 @@ static struct device_attribute dev_attr_wq_clients =
static ssize_t wq_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->state) {
case IDXD_WQ_DISABLED:
@@ -781,7 +449,7 @@ static struct device_attribute dev_attr_wq_state =
static ssize_t wq_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->group)
return sysfs_emit(buf, "%u\n", wq->group->id);
@@ -793,7 +461,7 @@ static ssize_t wq_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
long id;
int rc;
@@ -836,7 +504,7 @@ static struct device_attribute dev_attr_wq_group_id =
static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
}
@@ -845,7 +513,7 @@ static ssize_t wq_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -872,7 +540,7 @@ static struct device_attribute dev_attr_wq_mode =
static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->size);
}
@@ -895,7 +563,7 @@ static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long size;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -923,7 +591,7 @@ static struct device_attribute dev_attr_wq_size =
static ssize_t wq_priority_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->priority);
}
@@ -932,7 +600,7 @@ static ssize_t wq_priority_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long prio;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -960,7 +628,7 @@ static struct device_attribute dev_attr_wq_priority =
static ssize_t wq_block_on_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
@@ -969,11 +637,14 @@ static ssize_t wq_block_on_fault_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool bof;
int rc;
+ if (!idxd->hw.gen_cap.block_on_fault)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -999,7 +670,7 @@ static struct device_attribute dev_attr_wq_block_on_fault =
static ssize_t wq_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->threshold);
}
@@ -1008,7 +679,7 @@ static ssize_t wq_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
unsigned int val;
int rc;
@@ -1040,7 +711,7 @@ static struct device_attribute dev_attr_wq_threshold =
static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->type) {
case IDXD_WQT_KERNEL:
@@ -1059,7 +730,7 @@ static ssize_t wq_type_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
enum idxd_wq_type old_type;
if (wq->state != IDXD_WQ_DISABLED)
@@ -1088,7 +759,7 @@ static struct device_attribute dev_attr_wq_type =
static ssize_t wq_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq->name);
}
@@ -1097,7 +768,7 @@ static ssize_t wq_name_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -1124,7 +795,7 @@ static struct device_attribute dev_attr_wq_name =
static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
int minor = -1;
mutex_lock(&wq->wq_lock);
@@ -1158,7 +829,7 @@ static int __get_sysfs_u64(const char *buf, u64 *val)
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
}
@@ -1166,7 +837,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 xfer_size;
int rc;
@@ -1192,7 +863,7 @@ static struct device_attribute dev_attr_wq_max_transfer_size =
static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->max_batch_size);
}
@@ -1200,7 +871,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 batch_size;
int rc;
@@ -1225,7 +896,7 @@ static struct device_attribute dev_attr_wq_max_batch_size =
static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->ats_dis);
}
@@ -1233,7 +904,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool ats_dis;
int rc;
@@ -1256,6 +927,24 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
static struct device_attribute dev_attr_wq_ats_disable =
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ u32 occup, offset;
+
+ if (!idxd->hw.wq_cap.occupancy)
+ return -EOPNOTSUPP;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
+ occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
+
+ return sysfs_emit(buf, "%u\n", occup);
+}
+
+static struct device_attribute dev_attr_wq_occupancy =
+ __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1271,6 +960,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
+ &dev_attr_wq_occupancy.attr,
NULL,
};
@@ -1285,7 +975,7 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
static void idxd_conf_wq_release(struct device *dev)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
kfree(wq->wqcfg);
kfree(wq);
@@ -1301,8 +991,7 @@ struct device_type idxd_wq_device_type = {
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->hw.version);
}
@@ -1312,8 +1001,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
}
@@ -1322,8 +1010,7 @@ static DEVICE_ATTR_RO(max_work_queues_size);
static ssize_t max_groups_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_groups);
}
@@ -1332,8 +1019,7 @@ static DEVICE_ATTR_RO(max_groups);
static ssize_t max_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wqs);
}
@@ -1342,8 +1028,7 @@ static DEVICE_ATTR_RO(max_work_queues);
static ssize_t max_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_engines);
}
@@ -1352,8 +1037,7 @@ static DEVICE_ATTR_RO(max_engines);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
}
@@ -1362,8 +1046,7 @@ static DEVICE_ATTR_RO(numa_node);
static ssize_t max_batch_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
}
@@ -1373,8 +1056,7 @@ static ssize_t max_transfer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
}
@@ -1383,8 +1065,7 @@ static DEVICE_ATTR_RO(max_transfer_size);
static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, rc = 0;
for (i = 0; i < 4; i++)
@@ -1399,8 +1080,7 @@ static DEVICE_ATTR_RO(op_cap);
static ssize_t gen_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
@@ -1409,8 +1089,7 @@ static DEVICE_ATTR_RO(gen_cap);
static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
}
@@ -1419,18 +1098,16 @@ static DEVICE_ATTR_RO(configurable);
static ssize_t clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
- unsigned long flags;
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int count = 0, i;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return sysfs_emit(buf, "%d\n", count);
}
@@ -1439,8 +1116,7 @@ static DEVICE_ATTR_RO(clients);
static ssize_t pasid_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
}
@@ -1449,12 +1125,10 @@ static DEVICE_ATTR_RO(pasid_enabled);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
switch (idxd->state) {
case IDXD_DEV_DISABLED:
- case IDXD_DEV_CONF_READY:
return sysfs_emit(buf, "disabled\n");
case IDXD_DEV_ENABLED:
return sysfs_emit(buf, "enabled\n");
@@ -1469,15 +1143,13 @@ static DEVICE_ATTR_RO(state);
static ssize_t errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, out = 0;
- unsigned long flags;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
out--;
out += sysfs_emit_at(buf, out, "\n");
return out;
@@ -1487,8 +1159,7 @@ static DEVICE_ATTR_RO(errors);
static ssize_t max_tokens_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_tokens);
}
@@ -1497,8 +1168,7 @@ static DEVICE_ATTR_RO(max_tokens);
static ssize_t token_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->token_limit);
}
@@ -1507,8 +1177,7 @@ static ssize_t token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
unsigned long val;
int rc;
@@ -1536,8 +1205,7 @@ static DEVICE_ATTR_RW(token_limit);
static ssize_t cdev_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->major);
}
@@ -1546,11 +1214,20 @@ static DEVICE_ATTR_RO(cdev_major);
static ssize_t cmd_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
}
-static DEVICE_ATTR_RO(cmd_status);
+
+static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ idxd->cmd_status = 0;
+ return count;
+}
+static DEVICE_ATTR_RW(cmd_status);
static struct attribute *idxd_device_attributes[] = {
&dev_attr_version.attr,
@@ -1586,7 +1263,7 @@ static const struct attribute_group *idxd_attribute_groups[] = {
static void idxd_conf_device_release(struct device *dev)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
kfree(idxd->wqs);
@@ -1611,12 +1288,12 @@ struct device_type iax_device_type = {
static int idxd_register_engine_devices(struct idxd_device *idxd)
{
+ struct idxd_engine *engine;
int i, j, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = idxd->engines[i];
-
- rc = device_add(&engine->conf_dev);
+ engine = idxd->engines[i];
+ rc = device_add(engine_confdev(engine));
if (rc < 0)
goto cleanup;
}
@@ -1625,22 +1302,26 @@ static int idxd_register_engine_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ for (; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ put_device(engine_confdev(engine));
+ }
- while (j--)
- device_unregister(&idxd->engines[j]->conf_dev);
+ while (j--) {
+ engine = idxd->engines[j];
+ device_unregister(engine_confdev(engine));
+ }
return rc;
}
static int idxd_register_group_devices(struct idxd_device *idxd)
{
+ struct idxd_group *group;
int i, j, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = idxd->groups[i];
-
- rc = device_add(&group->conf_dev);
+ group = idxd->groups[i];
+ rc = device_add(group_confdev(group));
if (rc < 0)
goto cleanup;
}
@@ -1649,22 +1330,26 @@ static int idxd_register_group_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ for (; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
- while (j--)
- device_unregister(&idxd->groups[j]->conf_dev);
+ while (j--) {
+ group = idxd->groups[j];
+ device_unregister(group_confdev(group));
+ }
return rc;
}
static int idxd_register_wq_devices(struct idxd_device *idxd)
{
+ struct idxd_wq *wq;
int i, rc, j;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- rc = device_add(&wq->conf_dev);
+ wq = idxd->wqs[i];
+ rc = device_add(wq_confdev(wq));
if (rc < 0)
goto cleanup;
}
@@ -1673,11 +1358,15 @@ static int idxd_register_wq_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ for (; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ put_device(wq_confdev(wq));
+ }
- while (j--)
- device_unregister(&idxd->wqs[j]->conf_dev);
+ while (j--) {
+ wq = idxd->wqs[j];
+ device_unregister(wq_confdev(wq));
+ }
return rc;
}
@@ -1686,7 +1375,7 @@ int idxd_register_devices(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
int rc, i;
- rc = device_add(&idxd->conf_dev);
+ rc = device_add(idxd_confdev(idxd));
if (rc < 0)
return rc;
@@ -1712,12 +1401,12 @@ int idxd_register_devices(struct idxd_device *idxd)
err_group:
for (i = 0; i < idxd->max_engines; i++)
- device_unregister(&idxd->engines[i]->conf_dev);
+ device_unregister(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- device_unregister(&idxd->wqs[i]->conf_dev);
+ device_unregister(wq_confdev(idxd->wqs[i]));
err_wq:
- device_del(&idxd->conf_dev);
+ device_del(idxd_confdev(idxd));
return rc;
}
@@ -1728,19 +1417,19 @@ void idxd_unregister_devices(struct idxd_device *idxd)
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
- device_unregister(&wq->conf_dev);
+ device_unregister(wq_confdev(wq));
}
for (i = 0; i < idxd->max_engines; i++) {
struct idxd_engine *engine = idxd->engines[i];
- device_unregister(&engine->conf_dev);
+ device_unregister(engine_confdev(engine));
}
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *group = idxd->groups[i];
- device_unregister(&group->conf_dev);
+ device_unregister(group_confdev(group));
}
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index df7704053d91..e2b5129c5f84 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4319,6 +4319,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long val;
+ int err;
if (!count || count > 11)
return -EINVAL;
@@ -4327,7 +4328,10 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
return -EFAULT;
/* Write a key */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
+
dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
isync();
@@ -4368,7 +4372,7 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long reg, val;
-
+ int err;
#ifdef CONFIG_440SP
/* 440SP uses default 0x14D polynomial only */
return -EINVAL;
@@ -4378,7 +4382,9 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
return -EINVAL;
/* e.g., 0x14D or 0x11D */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
if (val & ~0x1FF)
return -EINVAL;
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
new file mode 100644
index 000000000000..b430edd709f9
--- /dev/null
+++ b/drivers/dma/ptdma/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/ptdma/Makefile
new file mode 100644
index 000000000000..ce5410268a9a
--- /dev/null
+++ b/drivers/dma/ptdma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD Passthru DMA driver
+#
+
+obj-$(CONFIG_AMD_PTDMA) += ptdma.o
+
+ptdma-objs := ptdma-dev.o ptdma-dmaengine.o ptdma-debugfs.o
+
+ptdma-$(CONFIG_PCI) += ptdma-pci.o
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/ptdma/ptdma-debugfs.c
new file mode 100644
index 000000000000..c8307d3044a3
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-debugfs.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "ptdma.h"
+
+/* DebugFS helpers */
+#define RI_VERSION_NUM 0x0000003F
+
+#define RI_NUM_VQM 0x00078000
+#define RI_NVQM_SHIFT 15
+
+static int pt_debugfs_info_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+ unsigned int regval;
+
+ seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ regval = ioread32(pt->io_regs + CMD_PT_VERSION);
+
+ seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
+ seq_puts(s, " Engines:");
+ seq_puts(s, "\n");
+ seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+
+ return 0;
+}
+
+/*
+ * Return a formatted buffer containing the current
+ * statistics of queue for PTDMA
+ */
+static int pt_debugfs_stats_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+
+ seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
+
+ return 0;
+}
+
+static int pt_debugfs_queue_show(struct seq_file *s, void *p)
+{
+ struct pt_cmd_queue *cmd_q = s->private;
+ unsigned int regval;
+
+ if (!cmd_q)
+ return 0;
+
+ seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
+
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
+
+void ptdma_debugfs_setup(struct pt_device *pt)
+{
+ struct pt_cmd_queue *cmd_q;
+ struct dentry *debugfs_q_instance;
+
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_info_fops);
+
+ debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_stats_fops);
+
+ cmd_q = &pt->cmd_q;
+
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+}
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
new file mode 100644
index 000000000000..8a6bf291a73f
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptdma.h"
+
+/* Human-readable error strings */
+static char *pt_error_codes[] = {
+ "",
+ "ERR 01: ILLEGAL_ENGINE",
+ "ERR 03: ILLEGAL_FUNCTION_TYPE",
+ "ERR 04: ILLEGAL_FUNCTION_MODE",
+ "ERR 06: ILLEGAL_FUNCTION_SIZE",
+ "ERR 08: ILLEGAL_FUNCTION_RSVD",
+ "ERR 09: ILLEGAL_BUFFER_LENGTH",
+ "ERR 10: VLSB_FAULT",
+ "ERR 11: ILLEGAL_MEM_ADDR",
+ "ERR 12: ILLEGAL_MEM_SEL",
+ "ERR 13: ILLEGAL_CONTEXT_ID",
+ "ERR 15: 0xF Reserved",
+ "ERR 18: CMD_TIMEOUT",
+ "ERR 19: IDMA0_AXI_SLVERR",
+ "ERR 20: IDMA0_AXI_DECERR",
+ "ERR 21: 0x15 Reserved",
+ "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+ "ERR 23: IDMA1_AIXI_DECERR",
+ "ERR 24: 0x18 Reserved",
+ "ERR 27: 0x1B Reserved",
+ "ERR 38: ODMA0_AXI_SLVERR",
+ "ERR 39: ODMA0_AXI_DECERR",
+ "ERR 40: 0x28 Reserved",
+ "ERR 41: ODMA1_AXI_SLVERR",
+ "ERR 42: ODMA1_AXI_DECERR",
+ "ERR 43: LSB_PARITY_ERR",
+};
+
+static void pt_log_error(struct pt_device *d, int e)
+{
+ dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
+}
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn on the run bit */
+ iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
+}
+
+void pt_stop_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
+}
+
+static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ u32 tail;
+
+ if (soc) {
+ desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ desc->dw0 &= ~DWORD0_SOC;
+ }
+ mutex_lock(&cmd_q->q_mutex);
+
+ /* Copy 32-byte command descriptor to hw queue. */
+ memcpy(q_desc, desc, 32);
+ cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
+
+ /* The data used by this command must be flushed to memory */
+ wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ iowrite32(tail, cmd_q->reg_control + 0x0004);
+
+ /* Turn the queue back on using our cached control register */
+ pt_start_queue(cmd_q);
+ mutex_unlock(&cmd_q->q_mutex);
+
+ return 0;
+}
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ptdma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dw0 = CMD_DESC_DW0_VAL;
+ desc.length = pt_engine->src_len;
+ desc.src_lo = lower_32_bits(pt_engine->src_dma);
+ desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
+ desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
+ desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
+
+ return pt_core_execute_cmd(&desc, cmd_q);
+}
+
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
+
+static void pt_do_cmd_complete(unsigned long data)
+{
+ struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
+ struct pt_cmd *cmd = tdata->cmd;
+ struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
+ u32 tail;
+
+ if (cmd_q->cmd_error) {
+ /*
+ * Log the error and flush the queue by
+ * moving the head pointer
+ */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ pt_log_error(cmd_q->pt, cmd_q->cmd_error);
+ iowrite32(tail, cmd_q->reg_control + 0x0008);
+ }
+
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 status;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ status = ioread32(cmd_q->reg_control + 0x0010);
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ /* Acknowledge the interrupt */
+ iowrite32(status, cmd_q->reg_control + 0x0010);
+ pt_core_enable_queue_interrupts(pt);
+ pt_do_cmd_complete((ulong)&pt->tdata);
+ }
+ return IRQ_HANDLED;
+}
+
+int pt_core_init(struct pt_device *pt)
+{
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 dma_addr_lo, dma_addr_hi;
+ struct device *dev = pt->dev;
+ struct dma_pool *dma_pool;
+ int ret;
+
+ /* Allocate a dma pool for the queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
+
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ PT_DMAPOOL_MAX_SIZE,
+ PT_DMAPOOL_ALIGN, 0);
+ if (!dma_pool)
+ return -ENOMEM;
+
+ /* ptdma core initialisation */
+ iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
+ iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
+ iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
+ iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
+ iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
+
+ cmd_q->pt = pt;
+ cmd_q->dma_pool = dma_pool;
+ mutex_init(&cmd_q->q_mutex);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase) {
+ dev_err(dev, "unable to allocate command queue\n");
+ ret = -ENOMEM;
+ goto e_dma_alloc;
+ }
+
+ cmd_q->qidx = 0;
+
+ /* Preset some register values */
+ cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
+
+ /* Turn off the queues and disable interrupts until ready */
+ pt_core_disable_queue_interrupts(pt);
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ /* Request an irq */
+ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
+ if (ret)
+ goto e_pool;
+
+ /* Update the device registers with queue information. */
+ cmd_q->qcontrol &= ~CMD_Q_SIZE;
+ cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
+
+ dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ pt_core_enable_queue_interrupts(pt);
+
+ /* Register the DMA engine support */
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ goto e_dmaengine;
+
+ /* Set up debugfs entries */
+ ptdma_debugfs_setup(pt);
+
+ return 0;
+
+e_dmaengine:
+ free_irq(pt->pt_irq, pt);
+
+e_dma_alloc:
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
+
+e_pool:
+ dev_err(dev, "unable to allocate an IRQ\n");
+ dma_pool_destroy(pt->cmd_q.dma_pool);
+
+ return ret;
+}
+
+void pt_core_destroy(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_cmd *cmd;
+
+ /* Unregister the DMA engine */
+ pt_dmaengine_unregister(pt);
+
+ /* Disable and clear interrupts */
+ pt_core_disable_queue_interrupts(pt);
+
+ /* Turn off the run bit */
+ pt_stop_queue(cmd_q);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ free_irq(pt->pt_irq, pt);
+
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+ cmd_q->qbase_dma);
+
+ /* Flush the cmd queue */
+ while (!list_empty(&pt->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->pt_cmd_callback(cmd->data, -ENODEV);
+ }
+}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
new file mode 100644
index 000000000000..c9e52f6f2f50
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include "ptdma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &desc->pt_cmd;
+ pt = pt_cmd->pt;
+ cmd_q = &pt->cmd_q;
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_dma_start_desc(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->issued_to_hw = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_dma_desc *desc;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ pt_cmd = &desc->pt_cmd;
+ pt_cmd->pt = chan->pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ desc->len = len;
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ vchan_issue_pending(&chan->vc);
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_stop_queue(&chan->pt->cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_start_queue(&chan->pt->cmd_q);
+ desc = pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ vchan_get_all_descriptors(&chan->vc, &head);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct pt_dma_chan *chan;
+ struct dma_device *dma_dev = &pt->dma_dev;
+ char *cmd_cache_name;
+ char *desc_cache_name;
+ int ret;
+
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ dev_name(pt->dev));
+ if (!cmd_cache_name)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ dev_name(pt->dev));
+ if (!desc_cache_name) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /*
+ * PTDMA is intended to be used with the AMD NTB devices, hence
+ * marking it as DMA_PRIVATE.
+ */
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+
+ dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(pt->dma_cmd_cache);
+
+ return ret;
+}
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+ kmem_cache_destroy(pt->dma_cmd_cache);
+}
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/ptdma/ptdma-pci.c
new file mode 100644
index 000000000000..22739ff0c3c5
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include "ptdma.h"
+
+struct pt_msix {
+ int msix_count;
+ struct msix_entry msix_entry;
+};
+
+/*
+ * pt_alloc_struct - allocate and initialize the pt_device struct
+ *
+ * @dev: device struct of the PTDMA
+ */
+static struct pt_device *pt_alloc_struct(struct device *dev)
+{
+ struct pt_device *pt;
+
+ pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL);
+
+ if (!pt)
+ return NULL;
+ pt->dev = dev;
+
+ INIT_LIST_HEAD(&pt->cmd);
+
+ return pt;
+}
+
+static int pt_get_msix_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pt_msix->msix_entry.entry = 0;
+
+ ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ pt_msix->msix_count = ret;
+
+ pt->pt_irq = pt_msix->msix_entry.vector;
+
+ return 0;
+}
+
+static int pt_get_msi_irq(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ pt->pt_irq = pdev->irq;
+
+ return 0;
+}
+
+static int pt_get_irqs(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ int ret;
+
+ ret = pt_get_msix_irqs(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pt_get_msi_irq(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void pt_free_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pt_msix->msix_count)
+ pci_disable_msix(pdev);
+ else if (pt->pt_irq)
+ pci_disable_msi(pdev);
+
+ pt->pt_irq = 0;
+}
+
+static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pt_device *pt;
+ struct pt_msix *pt_msix;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret = -ENOMEM;
+
+ pt = pt_alloc_struct(dev);
+ if (!pt)
+ goto e_err;
+
+ pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL);
+ if (!pt_msix)
+ goto e_err;
+
+ pt->pt_msix = pt_msix;
+ pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
+ if (!pt->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ptdma");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ pt->io_regs = iomap_table[pt->dev_vdata->bar];
+ if (!pt->io_regs) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = pt_get_irqs(pt);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, pt);
+
+ if (pt->dev_vdata)
+ ret = pt_core_init(pt);
+
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ dev_err(dev, "initialization failed ret = %d\n", ret);
+
+ return ret;
+}
+
+static void pt_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pt_device *pt = dev_get_drvdata(dev);
+
+ if (!pt)
+ return;
+
+ if (pt->dev_vdata)
+ pt_core_destroy(pt);
+
+ pt_free_irqs(pt);
+}
+
+static const struct pt_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+ },
+};
+
+static const struct pci_device_id pt_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pt_pci_table);
+
+static struct pci_driver pt_pci_driver = {
+ .name = "ptdma",
+ .id_table = pt_pci_table,
+ .probe = pt_pci_probe,
+ .remove = pt_pci_remove,
+};
+
+module_pci_driver(pt_pci_driver);
+
+MODULE_AUTHOR("Sanjay R Mehta <sanju.mehta@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD PassThru DMA driver");
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
new file mode 100644
index 000000000000..afbf192c9230
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#ifndef __PT_DEV_H__
+#define __PT_DEV_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+
+#include "../virt-dma.h"
+
+#define MAX_PT_NAME_LEN 16
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 1
+#define MAX_CMD_QLEN 100
+
+#define PT_ENGINE_PASSTHRU 5
+
+/* Register Mappings */
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
+
+#define CMD_QUEUE_PRIO_OFFSET 0x00
+#define CMD_REQID_CONFIG_OFFSET 0x04
+#define CMD_TIMEOUT_OFFSET 0x08
+#define CMD_PT_VERSION 0x10
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+#define CMD_Q_DMA_STATUS_BASE 0x0108
+#define CMD_Q_DMA_READ_STATUS_BASE 0x010C
+#define CMD_Q_DMA_WRITE_STATUS_BASE 0x0110
+#define CMD_Q_ABORT_BASE 0x0114
+#define CMD_Q_AX_CACHE_BASE 0x0118
+
+#define CMD_CONFIG_OFFSET 0x1120
+#define CMD_CLK_GATE_CTL_OFFSET 0x6004
+
+#define CMD_DESC_DW0_VAL 0x500012
+
+/* Address offset for virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_CONFIG_REQID 0
+#define CMD_TIMEOUT_DISABLE 0
+#define CMD_CLK_DYN_GATING_DIS 0
+#define CMD_CLK_SW_GATE_MODE 0
+#define CMD_CLK_GATE_CTL 0
+#define CMD_QUEUE_PRIO GENMASK(2, 1)
+#define CMD_CONFIG_VHB_EN BIT(0)
+#define CMD_CLK_DYN_GATING_EN BIT(0)
+#define CMD_CLK_HW_GATE_MODE BIT(0)
+#define CMD_CLK_GATE_ON_DELAY BIT(12)
+#define CMD_CLK_GATE_OFF_DELAY BIT(12)
+
+#define CMD_CLK_GATE_CONFIG (CMD_CLK_GATE_CTL | \
+ CMD_CLK_HW_GATE_MODE | \
+ CMD_CLK_GATE_ON_DELAY | \
+ CMD_CLK_DYN_GATING_EN | \
+ CMD_CLK_GATE_OFF_DELAY)
+
+#define CMD_Q_LEN 32
+#define CMD_Q_RUN BIT(0)
+#define CMD_Q_HALT BIT(1)
+#define CMD_Q_MEM_LOCATION BIT(2)
+#define CMD_Q_SIZE_MASK GENMASK(4, 0)
+#define CMD_Q_SIZE GENMASK(7, 3)
+#define CMD_Q_SHIFT GENMASK(1, 0)
+#define QUEUE_SIZE_VAL ((ffs(CMD_Q_LEN) - 2) & \
+ CMD_Q_SIZE_MASK)
+#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE sizeof(struct ptdma_desc)
+#define Q_SIZE(n) (CMD_Q_LEN * (n))
+
+#define INT_COMPLETION BIT(0)
+#define INT_ERROR BIT(1)
+#define INT_QUEUE_STOPPED BIT(2)
+#define INT_EMPTY_QUEUE BIT(3)
+#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
+
+/****** Local Storage Block ******/
+#define LSB_START 0
+#define LSB_END 127
+#define LSB_COUNT (LSB_END - LSB_START + 1)
+
+#define PT_DMAPOOL_MAX_SIZE 64
+#define PT_DMAPOOL_ALIGN BIT(5)
+
+#define PT_PASSTHRU_BLOCKSIZE 512
+
+struct pt_device;
+
+struct pt_tasklet_data {
+ struct completion completion;
+ struct pt_cmd *cmd;
+};
+
+/*
+ * struct pt_passthru_engine - pass-through operation
+ * without performing DMA mapping
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src_dma: data to be used for this operation
+ * @dst_dma: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not PT_PASSTHRU_BITWISE_NOOP
+ */
+struct pt_passthru_engine {
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+};
+
+/*
+ * struct pt_cmd - PTDMA operation request
+ * @entry: list element
+ * @work: work element used for callbacks
+ * @pt: PT device to be run on
+ * @ret: operation return code
+ * @flags: cmd processing flags
+ * @engine: PTDMA operation to perform (passthru)
+ * @engine_error: PT engine return code
+ * @passthru: engine specific structures, refer to specific engine struct below
+ * @callback: operation completion callback function
+ * @data: parameter value to be supplied to the callback function
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - engine, callback
+ * - See the operation structures below for what is required for each
+ * operation.
+ */
+struct pt_cmd {
+ struct list_head entry;
+ struct work_struct work;
+ struct pt_device *pt;
+ int ret;
+ u32 engine;
+ u32 engine_error;
+ struct pt_passthru_engine passthru;
+ /* Completion callback support */
+ void (*pt_cmd_callback)(void *data, int err);
+ void *data;
+};
+
+struct pt_dma_desc {
+ struct virt_dma_desc vd;
+ struct pt_device *pt;
+ enum dma_status status;
+ size_t len;
+ bool issued_to_hw;
+ struct pt_cmd pt_cmd;
+};
+
+struct pt_dma_chan {
+ struct virt_dma_chan vc;
+ struct pt_device *pt;
+};
+
+struct pt_cmd_queue {
+ struct pt_device *pt;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue base address (not neccessarily aligned)*/
+ struct ptdma_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+ struct mutex q_mutex ____cacheline_aligned;
+ unsigned int qidx;
+
+ unsigned int qsize;
+ dma_addr_t qbase_dma;
+ dma_addr_t qdma_tail;
+
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Register addresses for queue */
+ void __iomem *reg_control;
+ u32 qcontrol; /* Cached control register */
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+ /* Queue Statistics */
+ unsigned long total_pt_ops;
+} ____cacheline_aligned;
+
+struct pt_device {
+ struct list_head entry;
+
+ unsigned int ord;
+ char name[MAX_PT_NAME_LEN];
+
+ struct device *dev;
+
+ /* Bus specific device information */
+ struct pt_msix *pt_msix;
+
+ struct pt_dev_vdata *dev_vdata;
+
+ unsigned int pt_irq;
+
+ /* I/O area used for device communication */
+ void __iomem *io_regs;
+
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+
+ /*
+ * The command queue. This represent the queue available on the
+ * PTDMA that are available for processing cmds
+ */
+ struct pt_cmd_queue cmd_q;
+
+ /* Support for the DMA Engine capabilities */
+ struct dma_device dma_dev;
+ struct pt_dma_chan *pt_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
+ wait_queue_head_t lsb_queue;
+
+ /* Device Statistics */
+ unsigned long total_interrupts;
+
+ struct pt_tasklet_data tdata;
+};
+
+/*
+ * descriptor for PTDMA commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: reserved 32 bits
+ * word 7: reserved 32 bits
+ */
+
+#define DWORD0_SOC BIT(0)
+#define DWORD0_IOC BIT(1)
+
+struct dword3 {
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
+};
+
+struct dword5 {
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
+};
+
+struct ptdma_desc {
+ u32 dw0;
+ u32 length;
+ u32 src_lo;
+ struct dword3 dw3;
+ u32 dst_lo;
+ struct dword5 dw5;
+ __le32 rsvd1;
+ __le32 rsvd2;
+};
+
+/* Structure to hold PT device data */
+struct pt_dev_vdata {
+ const unsigned int bar;
+};
+
+int pt_dmaengine_register(struct pt_device *pt);
+void pt_dmaengine_unregister(struct pt_device *pt);
+
+void ptdma_debugfs_setup(struct pt_device *pt);
+int pt_core_init(struct pt_device *pt);
+void pt_core_destroy(struct pt_device *pt);
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine);
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q);
+void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+
+#endif
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 13437323a85b..a46296285307 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,3 +47,12 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
+
+config RZ_DMAC
+ tristate "Renesas RZ/G2L DMA Controller"
+ depends on ARCH_R9A07G044 || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the general purpose DMA controller found in the
+ Renesas RZ/G2L SoC variants.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index abdf10341725..360ab6d25e76 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
new file mode 100644
index 000000000000..f9f30cbeccbe
--- /dev/null
+++ b/drivers/dma/sh/rz-dmac.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L DMA Controller Driver
+ *
+ * Based on imx-dma.c
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+enum rz_dmac_prep_type {
+ RZ_DMAC_DESC_MEMCPY,
+ RZ_DMAC_DESC_SLAVE_SG,
+};
+
+struct rz_lmdesc {
+ u32 header;
+ u32 sa;
+ u32 da;
+ u32 tb;
+ u32 chcfg;
+ u32 chitvl;
+ u32 chext;
+ u32 nxla;
+};
+
+struct rz_dmac_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t src;
+ dma_addr_t dest;
+ size_t len;
+ struct list_head node;
+ enum dma_transfer_direction direction;
+ enum rz_dmac_prep_type type;
+ /* For slave sg */
+ struct scatterlist *sg;
+ unsigned int sgcount;
+};
+
+#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
+
+struct rz_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *ch_base;
+ void __iomem *ch_cmn_base;
+ unsigned int index;
+ int irq;
+ struct rz_dmac_desc *desc;
+ int descs_allocated;
+
+ enum dma_slave_buswidth src_word_size;
+ enum dma_slave_buswidth dst_word_size;
+ dma_addr_t src_per_address;
+ dma_addr_t dst_per_address;
+
+ u32 chcfg;
+ u32 chctrl;
+ int mid_rid;
+
+ struct list_head ld_free;
+ struct list_head ld_queue;
+ struct list_head ld_active;
+
+ struct {
+ struct rz_lmdesc *base;
+ struct rz_lmdesc *head;
+ struct rz_lmdesc *tail;
+ dma_addr_t base_dma;
+ } lmdesc;
+};
+
+#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+
+struct rz_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *ext_base;
+
+ unsigned int n_channels;
+ struct rz_dmac_chan *channels;
+
+ DECLARE_BITMAP(modules, 1024);
+};
+
+#define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
+
+/*
+ * -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define CHSTAT 0x0024
+#define CHCTRL 0x0028
+#define CHCFG 0x002c
+#define NXLA 0x0038
+
+#define DCTRL 0x0000
+
+#define EACH_CHANNEL_OFFSET 0x0040
+#define CHANNEL_0_7_OFFSET 0x0000
+#define CHANNEL_0_7_COMMON_BASE 0x0300
+#define CHANNEL_8_15_OFFSET 0x0400
+#define CHANNEL_8_15_COMMON_BASE 0x0700
+
+#define CHSTAT_ER BIT(4)
+#define CHSTAT_EN BIT(0)
+
+#define CHCTRL_CLRINTMSK BIT(17)
+#define CHCTRL_CLRSUS BIT(9)
+#define CHCTRL_CLRTC BIT(6)
+#define CHCTRL_CLREND BIT(5)
+#define CHCTRL_CLRRQ BIT(4)
+#define CHCTRL_SWRST BIT(3)
+#define CHCTRL_STG BIT(2)
+#define CHCTRL_CLREN BIT(1)
+#define CHCTRL_SETEN BIT(0)
+#define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
+ CHCTRL_CLRTC | CHCTRL_CLREND | \
+ CHCTRL_CLRRQ | CHCTRL_SWRST | \
+ CHCTRL_CLREN)
+
+#define CHCFG_DMS BIT(31)
+#define CHCFG_DEM BIT(24)
+#define CHCFG_DAD BIT(21)
+#define CHCFG_SAD BIT(20)
+#define CHCFG_REQD BIT(3)
+#define CHCFG_SEL(bits) ((bits) & 0x07)
+#define CHCFG_MEM_COPY (0x80400008)
+#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
+#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
+#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
+#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
+#define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
+
+#define MID_RID_MASK GENMASK(9, 0)
+#define CHCFG_MASK GENMASK(15, 10)
+#define CHCFG_DS_INVALID 0xFF
+#define DCTRL_LVINT BIT(1)
+#define DCTRL_PR BIT(0)
+#define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
+
+/* LINK MODE DESCRIPTOR */
+#define HEADER_LV BIT(0)
+
+#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
+#define RZ_DMAC_MAX_CHANNELS 16
+#define DMAC_NR_LMDESC 64
+
+/*
+ * -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->base + offset);
+}
+
+static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->ext_base + offset);
+}
+
+static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
+{
+ return readl(dmac->ext_base + offset);
+}
+
+static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
+ unsigned int offset, int which)
+{
+ if (which)
+ writel(val, channel->ch_base + offset);
+ else
+ writel(val, channel->ch_cmn_base + offset);
+}
+
+static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
+ unsigned int offset, int which)
+{
+ if (which)
+ return readl(channel->ch_base + offset);
+ else
+ return readl(channel->ch_cmn_base + offset);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
+ struct rz_lmdesc *lmdesc)
+{
+ u32 nxla;
+
+ channel->lmdesc.base = lmdesc;
+ channel->lmdesc.head = lmdesc;
+ channel->lmdesc.tail = lmdesc;
+ nxla = channel->lmdesc.base_dma;
+ while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
+ lmdesc->header = 0;
+ nxla += sizeof(*lmdesc);
+ lmdesc->nxla = nxla;
+ lmdesc++;
+ }
+
+ lmdesc->header = 0;
+ lmdesc->nxla = channel->lmdesc.base_dma;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
+{
+ struct rz_lmdesc *lmdesc = channel->lmdesc.head;
+
+ while (!(lmdesc->header & HEADER_LV)) {
+ lmdesc->header = 0;
+ lmdesc++;
+ if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+ channel->lmdesc.head = lmdesc;
+}
+
+static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+ u32 nxla;
+ u32 chctrl;
+ u32 chstat;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+
+ rz_dmac_lmdesc_recycle(channel);
+
+ nxla = channel->lmdesc.base_dma +
+ (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
+ channel->lmdesc.base));
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (!(chstat & CHSTAT_EN)) {
+ chctrl = (channel->chctrl | CHCTRL_SETEN);
+ rz_dmac_ch_writel(channel, nxla, NXLA, 1);
+ rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
+ rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
+{
+ u32 dmars_offset = (nr / 2) * 4;
+ u32 shift = (nr % 2) * 16;
+ u32 dmars32;
+
+ dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
+ dmars32 &= ~(0xffff << shift);
+ dmars32 |= dmars << shift;
+
+ rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
+}
+
+static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
+ struct rz_dmac_desc *d = channel->desc;
+ u32 chcfg = CHCFG_MEM_COPY;
+
+ /* prepare descriptor */
+ lmdesc->sa = d->src;
+ lmdesc->da = d->dest;
+ lmdesc->tb = d->len;
+ lmdesc->chcfg = chcfg;
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ lmdesc->header = HEADER_LV;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+
+ channel->chcfg = chcfg;
+ channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
+}
+
+static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *d = channel->desc;
+ struct scatterlist *sg, *sgl = d->sg;
+ struct rz_lmdesc *lmdesc;
+ unsigned int i, sg_len = d->sgcount;
+
+ channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
+
+ if (d->direction == DMA_DEV_TO_MEM) {
+ channel->chcfg |= CHCFG_SAD;
+ channel->chcfg &= ~CHCFG_REQD;
+ } else {
+ channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
+ }
+
+ lmdesc = channel->lmdesc.tail;
+
+ for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
+ if (d->direction == DMA_DEV_TO_MEM) {
+ lmdesc->sa = channel->src_per_address;
+ lmdesc->da = sg_dma_address(sg);
+ } else {
+ lmdesc->sa = sg_dma_address(sg);
+ lmdesc->da = channel->dst_per_address;
+ }
+
+ lmdesc->tb = sg_dma_len(sg);
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ if (i == (sg_len - 1)) {
+ lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
+ lmdesc->header = HEADER_LV;
+ } else {
+ lmdesc->chcfg = channel->chcfg;
+ lmdesc->header = HEADER_LV;
+ }
+ if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+
+ channel->lmdesc.tail = lmdesc;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ channel->chctrl = CHCTRL_SETEN;
+}
+
+static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
+{
+ struct rz_dmac_desc *d = chan->desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd)
+ return 0;
+
+ list_del(&vd->node);
+
+ switch (d->type) {
+ case RZ_DMAC_DESC_MEMCPY:
+ rz_dmac_prepare_desc_for_memcpy(chan);
+ break;
+
+ case RZ_DMAC_DESC_SLAVE_SG:
+ rz_dmac_prepare_descs_for_slave_sg(chan);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ rz_dmac_enable_hw(chan);
+
+ return 0;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
+ struct rz_dmac_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ list_add_tail(&desc->node, &channel->ld_free);
+ channel->descs_allocated++;
+ }
+
+ if (!channel->descs_allocated)
+ return -ENOMEM;
+
+ return channel->descs_allocated;
+}
+
+static void rz_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.base;
+ struct rz_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
+ rz_dmac_disable_hw(channel);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+
+ if (channel->mid_rid >= 0) {
+ clear_bit(channel->mid_rid, dmac->modules);
+ channel->mid_rid = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
+ kfree(desc);
+ channel->descs_allocated--;
+ }
+
+ INIT_LIST_HEAD(&channel->ld_free);
+ vchan_free_chan_resources(&channel->vc);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+
+ dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
+ __func__, channel->index, &src, &dest, len);
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ desc->type = RZ_DMAC_DESC_MEMCPY;
+ desc->src = src;
+ desc->dest = dest;
+ desc->len = len;
+ desc->direction = DMA_MEM_TO_MEM;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac_desc *desc;
+ struct scatterlist *sg;
+ int dma_length = 0;
+ int i = 0;
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg_dma_len(sg);
+ }
+
+ desc->type = RZ_DMAC_DESC_SLAVE_SG;
+ desc->sg = sgl;
+ desc->sgcount = sg_len;
+ desc->len = dma_length;
+ desc->direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->src = channel->src_per_address;
+ else
+ desc->dest = channel->dst_per_address;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static int rz_dmac_terminate_all(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ rz_dmac_disable_hw(channel);
+ spin_lock_irqsave(&channel->vc.lock, flags);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+ vchan_get_all_descriptors(&channel->vc, &head);
+ vchan_dma_desc_free_list(&channel->vc, &head);
+
+ return 0;
+}
+
+static void rz_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue,
+ struct rz_dmac_desc, node);
+ channel->desc = desc;
+ if (vchan_issue_pending(&channel->vc)) {
+ if (rz_dmac_xfer_desc(channel) < 0)
+ dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
+ channel->index);
+ else
+ list_move_tail(channel->ld_queue.next,
+ &channel->ld_active);
+ }
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+}
+
+static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
+{
+ u8 i;
+ const enum dma_slave_buswidth ds_lut[] = {
+ DMA_SLAVE_BUSWIDTH_1_BYTE,
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_SLAVE_BUSWIDTH_8_BYTES,
+ DMA_SLAVE_BUSWIDTH_16_BYTES,
+ DMA_SLAVE_BUSWIDTH_32_BYTES,
+ DMA_SLAVE_BUSWIDTH_64_BYTES,
+ DMA_SLAVE_BUSWIDTH_128_BYTES,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
+ if (ds_lut[i] == ds)
+ return i;
+ }
+
+ return CHCFG_DS_INVALID;
+}
+
+static int rz_dmac_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ u32 val;
+
+ channel->src_per_address = config->src_addr;
+ channel->src_word_size = config->src_addr_width;
+ channel->dst_per_address = config->dst_addr;
+ channel->dst_word_size = config->dst_addr_width;
+
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_DDS(val);
+
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_SDS(val);
+
+ return 0;
+}
+
+static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ /*
+ * Place holder
+ * Descriptor allocation is done during alloc_chan_resources and
+ * get freed during free_chan_resources.
+ * list is used to manage the descriptors and avoid any memory
+ * allocation/free during DMA read/write.
+ */
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat, chctrl;
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (chstat & CHSTAT_ER) {
+ dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
+ channel->index, chstat);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ goto done;
+ }
+
+ chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
+done:
+ return;
+}
+
+static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+
+ if (channel) {
+ rz_dmac_irq_handle_channel(channel);
+ return IRQ_WAKE_THREAD;
+ }
+ /* handle DMAERR irq */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+ struct rz_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (list_empty(&channel->ld_active)) {
+ /* Someone might have called terminate all */
+ goto out;
+ }
+
+ desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
+ vchan_cookie_complete(&desc->vd);
+ list_move_tail(channel->ld_active.next, &channel->ld_free);
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
+ node);
+ channel->desc = desc;
+ if (rz_dmac_xfer_desc(channel) == 0)
+ list_move_tail(channel->ld_queue.next, &channel->ld_active);
+ }
+out:
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct of_phandle_args *dma_spec = arg;
+ u32 ch_cfg;
+
+ channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
+ ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
+ channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
+ CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
+
+ return !test_and_set_bit(channel->mid_rid, dmac->modules);
+}
+
+static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rz_dmac_chan_probe(struct rz_dmac *dmac,
+ struct rz_dmac_chan *channel,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ struct rz_lmdesc *lmdesc;
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ channel->index = index;
+ channel->mid_rid = -EINVAL;
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (channel->irq < 0)
+ return channel->irq;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dmac->dev, channel->irq,
+ rz_dmac_irq_handler,
+ rz_dmac_irq_handler_thread, 0,
+ irqname, channel);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ channel->irq, ret);
+ return ret;
+ }
+
+ /* Set io base address for each channel */
+ if (index < 8) {
+ channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
+ EACH_CHANNEL_OFFSET * index;
+ channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
+ } else {
+ channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
+ EACH_CHANNEL_OFFSET * (index - 8);
+ channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
+ }
+
+ /* Allocate descriptors */
+ lmdesc = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ &channel->lmdesc.base_dma, GFP_KERNEL);
+ if (!lmdesc) {
+ dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
+ return -ENOMEM;
+ }
+ rz_lmdesc_setup(channel, lmdesc);
+
+ /* Initialize register for each channel */
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+
+ channel->vc.desc_free = rz_dmac_virt_desc_free;
+ vchan_init(&channel->vc, &dmac->engine);
+ INIT_LIST_HEAD(&channel->ld_queue);
+ INIT_LIST_HEAD(&channel->ld_free);
+ INIT_LIST_HEAD(&channel->ld_active);
+
+ return 0;
+}
+
+static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_dmac_probe(struct platform_device *pdev)
+{
+ const char *irqname = "error";
+ struct dma_device *engine;
+ struct rz_dmac *dmac;
+ int channel_num;
+ unsigned int i;
+ int ret;
+ int irq;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = rz_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources */
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+
+ /* Register interrupt handler for error */
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
+ irqname, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Register the DMA engine device. */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
+
+ engine->dev = &pdev->dev;
+
+ engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rz_dmac_free_chan_resources;
+ engine->device_tx_status = dma_cookie_status;
+ engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
+ engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
+ engine->device_config = rz_dmac_config;
+ engine->device_terminate_all = rz_dmac_terminate_all;
+ engine->device_issue_pending = rz_dmac_issue_pending;
+
+ engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma_set_max_seg_size(engine->dev, U32_MAX);
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto dma_register_err;
+ }
+ return 0;
+
+dma_register_err:
+ of_dma_controller_free(pdev->dev.of_node);
+err:
+ channel_num = i ? i - 1 : 0;
+ for (i = 0; i < channel_num; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+
+ return ret;
+}
+
+static int rz_dmac_remove(struct platform_device *pdev)
+{
+ struct rz_dmac *dmac = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ return 0;
+}
+
+static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,rz-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
+
+static struct platform_driver rz_dmac_driver = {
+ .driver = {
+ .name = "rz-dmac",
+ .of_match_table = of_rz_dmac_match,
+ },
+ .probe = rz_dmac_probe,
+ .remove = rz_dmac_remove,
+};
+
+module_platform_driver(rz_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1cc06900153e..5edaeb89d1e6 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -466,7 +466,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc,
- int sg_index)
+ unsigned int sg_index)
{
struct usb_dmac_sg *sg = desc->sg + sg_index;
u32 mem_addr = sg->mem_addr & 0xffffffff;
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 0ef5ca81ba4d..4357d2395e6b 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
{ .compatible = "sprd,sc9860-dma", },
{},
};
+MODULE_DEVICE_TABLE(of, sprd_dma_match);
static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 7dd1d3d0bf06..9063c727962e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -60,6 +60,7 @@
#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
+#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
@@ -138,8 +139,9 @@
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
-#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
- >> 2)
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
+#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
+#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -1252,6 +1254,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
+ if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 4735742e826d..b1115a6d1935 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -655,9 +655,8 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
return ret;
}
- ret = pm_runtime_get_sync(tdc2dev(tdc));
+ ret = pm_runtime_resume_and_get(tdc2dev(tdc));
if (ret < 0) {
- pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -869,10 +868,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ if (ret < 0)
goto rpm_disable;
- }
ret = tegra_adma_init(tdma);
if (ret)
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 7580870ed746..34e3fc565a37 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -58,6 +58,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721e_src_ep_map[] = {
/* SA2UL */
@@ -138,6 +146,71 @@ static struct psil_ep j721e_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* CPSW9 */
PSIL_ETHERNET(0x4a00),
/* CPSW0 */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4b9530a7bf65..a4450bc95466 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1420,8 +1420,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
chan->desc_submitcount++;
chan->desc_pendingcount--;
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
+ list_move_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
@@ -1659,6 +1658,17 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_device_config - Configure the DMA channel
+ * @dchan: DMA channel
+ * @config: channel configuration
+ */
+static int xilinx_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ return 0;
+}
+
+/**
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete
* @chan : xilinx DMA channel
*
@@ -3077,7 +3087,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
+ dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3096,6 +3106,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_synchronize = xilinx_dma_synchronize;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 5fecf5aa6e85..97f02f8eb03a 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -434,8 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
- list_del(&sdesc->node);
- list_add_tail(&sdesc->node, &chan->free_list);
+ list_move_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
list_move_tail(&child->node, &chan->free_list);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 715e491dfbc3..4c3fd2eed1da 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -488,9 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
- struct fwnet_device *dev;
int status;
- __be64 guid;
switch (ether_type) {
case ETH_P_ARP:
@@ -503,7 +501,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
goto err;
}
- dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_NONE;
@@ -512,7 +509,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
* Parse the encapsulation header. This actually does the job of
* converting to an ethernet-like pseudo frame header.
*/
- guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index ced1964faf42..2ee97bab7440 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1147,6 +1147,64 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+bool qcom_scm_lmh_dcvsh_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
+
+int qcom_scm_lmh_profile_change(u32 profile_id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ .args[0] = profile_id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version)
+{
+ dma_addr_t payload_phys;
+ u32 *payload_buf;
+ int ret, payload_size = 5 * sizeof(u32);
+
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[1] = payload_size,
+ .args[2] = limit_node,
+ .args[3] = node_id,
+ .args[4] = version,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ if (!payload_buf)
+ return -ENOMEM;
+
+ payload_buf[0] = payload_fn;
+ payload_buf[1] = 0;
+ payload_buf[2] = payload_reg;
+ payload_buf[3] = 1;
+ payload_buf[4] = payload_val;
+
+ desc.args[0] = payload_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 632fe3142462..d92156ceb3ac 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -114,6 +114,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
+#define QCOM_SCM_SVC_LMH 0x13
+#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01
+#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
+
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 8f53837d4d3e..97178b307ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
}
-/*
- * Helper function to query RAS EEPROM address
- *
- * @adev: amdgpu_device pointer
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * adev: amdgpu_device pointer
+ * i2c_address: pointer to u8; if not NULL, will contain
+ * the RAS EEPROM address if the function returns true
*
- * Return true if vbios supports ras rom address reporting
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
*/
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+ u8 *i2c_address)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
union firmware_info *firmware_info;
u8 frev, crev;
- if (i2c_address == NULL)
- return false;
-
- *i2c_address = 0;
-
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
+ firmwareinfo);
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
- index, &size, &frev, &crev, &data_offset)) {
+ index, &size, &frev, &crev,
+ &data_offset)) {
/* support firmware_info 3.4 + */
if ((frev == 3 && crev >=4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
- *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ /* The ras_rom_i2c_slave_addr should ideally
+ * be a 19-bit EEPROM address, which would be
+ * used as is by the driver; see top of
+ * amdgpu_eeprom.c.
+ *
+ * When this is the case, 0 is of course a
+ * valid RAS EEPROM address, in which case,
+ * we'll drop the first "if (firm...)" and only
+ * leave the check for the pointer.
+ *
+ * The reason this works right now is because
+ * ras_rom_i2c_slave_addr contains the EEPROM
+ * device type qualifier 1010b in the top 4
+ * bits.
+ */
+ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+ if (i2c_address)
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ return true;
+ }
}
}
- if (*i2c_address != 0)
- return true;
-
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8e5a7ac8c36f..7a7316731911 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RENOIR:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b6640291f980..f18240f87387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
/* DIMGREY_CAVEFISH */
@@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index d94c5419ec25..5a6857c44bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+ struct amdgpu_bo *root;
int ret;
ret = amdgpu_file_to_fpriv(f, &fpriv);
@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
+ root = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!root)
+ return;
+
+ ret = amdgpu_bo_reserve(root, false);
if (ret) {
DRM_ERROR("Fail to reserve bo\n");
return;
}
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(fpriv->vm.root.bo);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
dev, fn, fpriv->vm.pasid);
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 14499f0de32d..8d682befe0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -552,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_stop(&ring->sched, NULL);
+
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
r = amdgpu_fence_wait_empty(ring);
@@ -611,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler) {
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
/* enable the interrupt */
if (ring->fence_drv.irq_src)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cb07cc3b06ed..d6aa032890ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -341,21 +341,18 @@ retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
+ if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
- size, initial_domain, args->in.alignment, r);
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
}
- return r;
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 543000304a1c..675a72ef305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Dummy, allocate the node but no space for it yet.
*/
@@ -182,7 +182,7 @@ err_out:
* amdgpu_gtt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 23efdc672502..9b41cb8c3de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -469,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode (%s) ",
- amdgpu_ucode_name(ucode->ucode_id));
- DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ DRM_WARN("failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
if (!timeout) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9dc3b2d88176..dc44c946a244 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -114,27 +114,24 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
- uint8_t ras_rom_i2c_slave_addr;
+ u8 i2c_addr;
if (!control)
return false;
- control->i2c_address = 0;
-
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &ras_rom_i2c_slave_addr))
- {
- switch (ras_rom_i2c_slave_addr) {
- case 0xA0:
- control->i2c_address = 0;
- return true;
- case 0xA8:
- control->i2c_address = 0x40000;
- return true;
- default:
- dev_warn(adev->dev, "RAS EEPROM I2C slave address %02x not supported",
- ras_rom_i2c_slave_addr);
- return false;
- }
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+
+ return true;
}
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2fd77c36a1ff..7b2b0980ec41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
@@ -487,7 +487,7 @@ error_sub:
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated VRAM again.
*/
@@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
- * @mem: TTM memory object
+ * @res: TTM memory object
* @offset: byte offset from the base of VRAM BO
* @length: number of bytes to export in sg_table
* @dev: the other device
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ff2307d7ee0f..23b066bcffb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
+ xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 50572635d0f8..bd3b23171579 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -37,6 +37,7 @@ enum idh_request {
IDH_REQ_GPU_RESET_ACCESS,
IDH_LOG_VF_ERROR = 200,
+ IDH_READY_TO_RESET = 201,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index ba1d3ab869c1..f50045cebd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -85,11 +85,14 @@
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
-#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
+#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define mmBIF_INTR_CNTL_ALDE 0x0101
+#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -440,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
}
return 0;
@@ -476,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 42a35d9520f9..fe9a7cc8d9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -904,14 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- /* Disable BACO support for the specific polaris12 SKU temporarily */
- if ((adev->pdev->device == 0x699F) &&
- (adev->pdev->revision == 0xC7) &&
- (adev->pdev->subsystem_vendor == 0x1028) &&
- (adev->pdev->subsystem_device == 0x0039))
- return false;
- else
- return amdgpu_dpm_is_baco_supported(adev);
+ return amdgpu_dpm_is_baco_supported(adev);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 491373fcdb38..9fc8021bb0ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2484,7 +2484,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
- return -EFAULT;
+ r = -EFAULT;
+ goto out;
}
svms = &p->svms;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 816723691d51..9b1fc54555ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1200,7 +1200,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->apu_flags) {
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
mmhub_read_system_context(adev, &pa_config);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cd025c12f17b..330edd666b7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
dp_decide_training_settings(
link,
@@ -1707,7 +1707,7 @@ enum link_training_result dc_link_dp_perform_link_training(
bool skip_video_pattern)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum dp_link_encoding encoding =
dp_get_link_encoding_format(link_settings);
@@ -1923,7 +1923,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dc7823d23ba8..dd38796ba30a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
vpg = dcn303_vpg_create(ctx, vpg_inst);
afmt = dcn303_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 7db268da6976..3b3721386571 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -109,7 +109,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_backlight_on;
}
@@ -119,7 +119,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_powered_on;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index fbed5304692d..63bbdf8b8678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2641,7 +2641,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 2d55627b05b1..249cb0aeb5ae 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2005,10 +2005,10 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 715b4225f5ee..8156729c370b 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -1335,6 +1335,30 @@ enum smu_cmn2asic_mapping_type {
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
+/**
+ * smu_memcpy_trailing - Copy the end of one structure into the middle of another
+ *
+ * @dst: Pointer to destination struct
+ * @first_dst_member: The member name in @dst where the overwrite begins
+ * @last_dst_member: The member name in @dst where the overwrite ends after
+ * @src: Pointer to the source struct
+ * @first_src_member: The member name in @src where the copy begins
+ *
+ */
+#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
+ src, first_src_member) \
+({ \
+ size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
+ size_t __src_size = sizeof(*(src)) - __src_offset; \
+ size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
+ size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
+ __dst_offset; \
+ BUILD_BUG_ON(__src_size != __dst_size); \
+ __builtin_memcpy((u8 *)(dst) + __dst_offset, \
+ (u8 *)(src) + __src_offset, \
+ __dst_size); \
+})
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 465ff8d2a01a..e7803ce8f67a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,6 +27,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/intel-family.h>
+#endif
#include <drm/amdgpu_drm.h>
#include "ppatomctrl.h"
#include "atombios.h"
@@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static bool intel_core_rkl_chk(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
+#else
+ return false;
+#endif
+}
+
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled =
+ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 273df66cac14..e343cc218990 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -483,10 +483,8 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 6))
- memcpy(&smc_pptable->MaxVoltageStepGfx,
- &smc_dpm_table->maxvoltagestepgfx,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
-
+ smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,
+ smc_dpm_table, maxvoltagestepgfx);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index f96681700c41..a5fc5d7cb6c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -431,16 +431,16 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
switch (smc_dpm_table->table_header.content_revision) {
case 5: /* nv10 and nv14 */
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
break;
case 7: /* nv12 */
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table_v4_7);
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
- sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table_v4_7, I2cControllers);
break;
default:
dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 6eb50b05a33c..3a3421452e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1869,7 +1869,7 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index b39138041141..5aa175e12a78 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -426,7 +426,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ec8c30daf31c..ab652028e003 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -409,9 +409,8 @@ static int aldebaran_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 10))
- memcpy(&smc_pptable->GfxMaxCurrent,
- &smc_dpm_table->GfxMaxCurrent,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
+ smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
+ smc_dpm_table, GfxMaxCurrent);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0f17c2522c85..627ba2eec7fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -731,7 +731,7 @@ static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index 51dbe0e3294e..d2969f68dd64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -6,7 +6,7 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
-#include <stddef.h>
+#include <linux/stddef.h>
struct intel_engine_cs;
struct intel_gt;
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index 7c903cf19c0d..e9ae22b4f813 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -124,6 +124,7 @@ static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clo
unsigned int computed;
m = n = p = s = 0;
+ delta = 0xffffffff;
permitteddelta = clock * 5 / 1000;
for (testp = 8; testp > 0; testp /= 2) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 0da5b3100ab1..dfe5f1d29763 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,25 +58,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
- /*
- * fls returns:
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
- */
-
- size = round_up(size, PAGE_SIZE);
- region_width = 10 + fls(size >> PAGE_SHIFT);
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
+ /* The size is encoded as ceil(log2) minus(1), which may be calculated
+ * with fls. The size must be clamped to hardware bounds.
+ */
+ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
+ region_width = fls64(size - 1) - 1;
region |= region_width;
/* Lock the region that needs to be updated */
@@ -87,7 +78,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
@@ -104,7 +95,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
int ret;
@@ -121,7 +112,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -137,7 +128,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -251,7 +242,7 @@ static size_t get_pgsize(u64 addr, size_t size)
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
if (mmu->as < 0)
return;
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 1940ff86e49a..6c5a11ef1ee8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -316,6 +316,8 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define gpu_read(dev, reg) readl(dev->iomem + reg)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8ab3247dbc4a..13c6b857158f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
return ret;
}
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea4add2b9717..bb9e02c31946 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1160,9 +1160,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
}
if (bo->deleted) {
- ttm_bo_cleanup_refs(bo, false, false, locked);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
- return 0;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
ttm_bo_del_from_lru(bo);
@@ -1216,7 +1216,7 @@ out:
if (locked)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- return ret;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 763fa6f4e07d..1c5ffe2935af 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man =
ttm_manager_type(bdev, src_mem->mem_type);
- struct ttm_resource src_copy = *src_mem;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- src_copy = *src_mem;
- ttm_bo_move_sync_cleanup(bo, dst_mem);
if (!src_iter->ops->maps_tt)
- ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cb38b1a17b09..82cbb29a05aa 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false;
struct ttm_pool_type *pt;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 24031a8acd2d..d5cd8b5dc0bf 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b7dc32a0c9bb..4a1115043114 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1462,7 +1462,7 @@ static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
.audio_startup = vc4_hdmi_audio_startup,
};
-struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
+static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.ops = &vc4_hdmi_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2aee356840a2..314015d9e912 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
mutex_unlock(&ring_info->ring_buffer_mutex);
kfree(ring_info->pkt_buffer);
+ ring_info->pkt_buffer = NULL;
ring_info->pkt_buffer_size = 0;
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 18da5a25e89a..868243dba1ee 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -17,6 +17,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/units.h>
/* PVT Common register */
#define PVT_IP_CONFIG 0x04
@@ -37,7 +38,6 @@
#define CLK_SYNTH_EN BIT(24)
#define CLK_SYS_CYCLES_MAX 514
#define CLK_SYS_CYCLES_MIN 2
-#define HZ_PER_MHZ 1000000L
#define SDIF_DISABLE 0x04
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 043f199e7bc6..9b279937a24e 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -6,12 +6,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/units.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#define HZ_PER_MHZ 1000000L
-
static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 7b32dfaee9b3..3ba2378df3dd 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
-
-#define HZ_PER_KHZ 1000
+#include <linux/units.h>
#define AS73211_DRV_NAME "as73211"
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 3048862c961c..408dfbcc47b5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1309,7 +1309,7 @@ out:
static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
struct bnxt_re_pd *pd)
{
- struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_sqp_entries *sqp_tbl;
struct bnxt_re_dev *rdev;
struct bnxt_re_qp *sqp;
struct bnxt_re_ah *sah;
@@ -1317,7 +1317,7 @@ static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
rdev = qp->rdev;
/* Create a shadow QP to handle the QP1 traffic */
- sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
GFP_KERNEL);
if (!sqp_tbl)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index d9b5bbb2d011..8302469582c6 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -488,7 +488,7 @@ struct hfi1_ctxt_hist {
atomic_t data[255];
};
-struct hfi1_ctxt_hist hist = {
+static struct hfi1_ctxt_hist hist = {
.count = ATOMIC_INIT(0)
};
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a520ac8ab68c..3be36ebbf67a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -995,7 +995,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
const size_t xlt_chunk_align =
- MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
+ MLX5_UMR_MTT_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
@@ -1024,7 +1024,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
if (size > MLX5_SPARE_UMR_CHUNK) {
size = MLX5_SPARE_UMR_CHUNK;
- *nents = get_order(size) / ent_size;
+ *nents = size / ent_size;
res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
get_order(size));
if (res)
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d57e49de6650..452e2355d24e 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,9 +403,11 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
+ static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
- .counter = &((struct qib_ibport *)0)->rvp.n_##N - (u64 *)0, \
+ .counter = \
+ offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \
}
QIB_DIAGC_ATTR(rc_resends);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index f798922a4598..882c3c8ba399 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -28,10 +28,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static bool use_ktime = true;
-module_param(use_ktime, bool, 0400);
-MODULE_PARM_DESC(use_ktime, "Use ktime for measuring I/O speed");
-
/*
* Option parsing.
*/
@@ -110,7 +106,6 @@ struct analog_port {
char cooked;
int bads;
int reads;
- int speed;
int loop;
int fuzz;
int axes[4];
@@ -120,66 +115,6 @@ struct analog_port {
};
/*
- * Time macros.
- */
-
-#ifdef __i386__
-
-#include <linux/i8253.h>
-
-#define GET_TIME(x) do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME (boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
-static unsigned int get_time_pit(void)
-{
- unsigned long flags;
- unsigned int count;
-
- raw_spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x00, 0x43);
- count = inb_p(0x40);
- count |= inb_p(0x40) << 8;
- raw_spin_unlock_irqrestore(&i8253_lock, flags);
-
- return count;
-}
-#elif defined(__x86_64__)
-#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_PPC) || defined(CONFIG_RISCV)
-#define GET_TIME(x) do { x = get_cycles(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "get_cycles"
-#else
-#define FAKE_TIME
-static unsigned long analog_faketime = 0;
-#define GET_TIME(x) do { x = analog_faketime++; } while(0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "Unreliable"
-#warning Precise timer not defined for this architecture.
-#endif
-
-static inline u64 get_time(void)
-{
- if (use_ktime) {
- return ktime_get_ns();
- } else {
- unsigned int x;
- GET_TIME(x);
- return x;
- }
-}
-
-static inline unsigned int delta(u64 x, u64 y)
-{
- if (use_ktime)
- return y - x;
- else
- return DELTA((unsigned int)x, (unsigned int)y);
-}
-
-/*
* analog_decode() decodes analog joystick data and reports input events.
*/
@@ -234,18 +169,18 @@ static void analog_decode(struct analog *analog, int *axes, int *initial, int bu
static int analog_cooked_read(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
- u64 time[4], start, loop, now;
+ ktime_t time[4], start, loop, now;
unsigned int loopout, timeout;
unsigned char data[4], this, last;
unsigned long flags;
int i, j;
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
- timeout = ANALOG_MAX_TIME * port->speed;
+ timeout = ANALOG_MAX_TIME * NSEC_PER_MSEC;
local_irq_save(flags);
gameport_trigger(gameport);
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
start = now;
@@ -258,16 +193,16 @@ static int analog_cooked_read(struct analog_port *port)
local_irq_disable();
this = gameport_read(gameport) & port->mask;
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
- if ((last ^ this) && (delta(loop, now) < loopout)) {
+ if ((last ^ this) && (ktime_sub(now, loop) < loopout)) {
data[i] = last ^ this;
time[i] = now;
i++;
}
- } while (this && (i < 4) && (delta(start, now) < timeout));
+ } while (this && (i < 4) && (ktime_sub(now, start) < timeout));
this <<= 4;
@@ -275,7 +210,7 @@ static int analog_cooked_read(struct analog_port *port)
this |= data[i];
for (j = 0; j < 4; j++)
if (data[i] & (1 << j))
- port->axes[j] = (delta(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop;
+ port->axes[j] = ((u32)ktime_sub(time[i], start) << ANALOG_FUZZ_BITS) / port->loop;
}
return -(this != port->mask);
@@ -375,38 +310,22 @@ static void analog_calibrate_timer(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
unsigned int i, t, tx;
- u64 t1, t2, t3;
+ ktime_t t1, t2, t3;
unsigned long flags;
- if (use_ktime) {
- port->speed = 1000000;
- } else {
- local_irq_save(flags);
- t1 = get_time();
-#ifdef FAKE_TIME
- analog_faketime += 830;
-#endif
- mdelay(1);
- t2 = get_time();
- t3 = get_time();
- local_irq_restore(flags);
-
- port->speed = delta(t1, t2) - delta(t2, t3);
- }
-
tx = ~0;
for (i = 0; i < 50; i++) {
local_irq_save(flags);
- t1 = get_time();
+ t1 = ktime_get();
for (t = 0; t < 50; t++) {
gameport_read(gameport);
- t2 = get_time();
+ t2 = ktime_get();
}
- t3 = get_time();
+ t3 = ktime_get();
local_irq_restore(flags);
udelay(i);
- t = delta(t1, t2) - delta(t2, t3);
+ t = ktime_sub(t2, t1) - ktime_sub(t3, t2);
if (t < tx) tx = t;
}
@@ -611,7 +530,7 @@ static int analog_init_port(struct gameport *gameport, struct gameport_driver *d
t = gameport_read(gameport);
msleep(ANALOG_MAX_TIME);
port->mask = (gameport_read(gameport) ^ t) & t & 0xf;
- port->fuzz = (port->speed * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
+ port->fuzz = (NSEC_PER_MSEC * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
for (i = 0; i < ANALOG_INIT_RETRIES; i++) {
if (!analog_cooked_read(port))
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 40a070a2e7f5..e75650e98c9e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -210,7 +210,7 @@ config KEYBOARD_LKKBD
select SERIO
help
Say Y here if you want to use a LK201 or LK401 style serial
- keyboard. This keyboard is also useable on PCs if you attach
+ keyboard. This keyboard is also usable on PCs if you attach
it with the inputattach program. The connector pinout is
described within lkkbd.c.
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index 6d5be48d1b3d..bf72ab8df817 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -193,7 +193,7 @@ static const struct of_device_id adc_keys_of_match[] = {
MODULE_DEVICE_TABLE(of, adc_keys_of_match);
#endif
-static struct platform_driver __refdata adc_keys_driver = {
+static struct platform_driver adc_keys_driver = {
.driver = {
.name = "adc_keys",
.of_match_table = of_match_ptr(adc_keys_of_match),
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 90a59b973d00..1592da4de336 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 654e0476406b..bdd264459a97 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/input/adp5589.h>
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8194333d612..e0e931e796fa 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -157,7 +157,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
keypad->enabled = true;
}
@@ -169,7 +169,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
struct ep93xx_keypad *keypad = input_get_drvdata(pdev);
if (keypad->enabled) {
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
keypad->enabled = false;
}
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 498cde376981..dd5227cf8696 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -309,18 +309,6 @@ config INPUT_GPIO_VIBRA
To compile this driver as a module, choose M here: the module will be
called gpio-vibra.
-config INPUT_IXP4XX_BEEPER
- tristate "IXP4XX Beeper support"
- depends on ARCH_IXP4XX
- help
- If you say yes here, you can connect a beeper to the
- ixp4xx gpio pins. This is used by the LinkSys NSLU2.
-
- If unsure, say Y.
-
- To compile this driver as a module, choose M here: the
- module will be called ixp4xx-beeper.
-
config INPUT_COBALT_BTNS
tristate "Cobalt button interface"
depends on MIPS_COBALT
@@ -811,16 +799,6 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
-config INPUT_SIRFSOC_ONKEY
- tristate "CSR SiRFSoC power on/off/suspend key support"
- depends on ARCH_SIRF && OF
- default y
- help
- Say Y here if you want to support for the SiRFSoC power on/off/suspend key
- in Linux, after you press the onkey, system will suspend.
-
- If unsure, say N.
-
config INPUT_IDEAPAD_SLIDEBAR
tristate "IdeaPad Laptop Slidebar"
depends on INPUT
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f593beed7e05..b92c53a6b5ae 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
-obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
@@ -74,7 +73,6 @@ obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
-obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
deleted file mode 100644
index 05018d0c97c7..000000000000
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic IXP4xx beeper driver
- *
- * Copyright (C) 2005 Tower Technologies
- *
- * based on nslu2-io.c
- * Copyright (C) 2004 Karen Spearel
- *
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- * Maintainers: http://www.nslu2-linux.org/
- */
-
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <mach/hardware.h>
-
-MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
-MODULE_DESCRIPTION("ixp4xx beeper driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ixp4xx-beeper");
-
-static DEFINE_SPINLOCK(beep_lock);
-
-static int ixp4xx_timer2_irq;
-
-static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&beep_lock, flags);
-
- if (count) {
- gpio_direction_output(pin, 0);
- *IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE;
- } else {
- gpio_direction_output(pin, 1);
- gpio_direction_input(pin);
- *IXP4XX_OSRT2 = 0;
- }
-
- spin_unlock_irqrestore(&beep_lock, flags);
-}
-
-static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- unsigned int pin = (unsigned int) input_get_drvdata(dev);
- unsigned int count = 0;
-
- if (type != EV_SND)
- return -1;
-
- switch (code) {
- case SND_BELL:
- if (value)
- value = 1000;
- case SND_TONE:
- break;
- default:
- return -1;
- }
-
- if (value > 20 && value < 32767)
- count = (ixp4xx_timer_freq / (value * 4)) - 1;
-
- ixp4xx_spkr_control(pin, count);
-
- return 0;
-}
-
-static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
-{
- unsigned int pin = (unsigned int) dev_id;
-
- /* clear interrupt */
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND;
-
- /* flip the beeper output */
- gpio_set_value(pin, !gpio_get_value(pin));
-
- return IRQ_HANDLED;
-}
-
-static int ixp4xx_spkr_probe(struct platform_device *dev)
-{
- struct input_dev *input_dev;
- int irq;
- int err;
-
- input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
-
- input_set_drvdata(input_dev, (void *) dev->id);
-
- input_dev->name = "ixp4xx beeper";
- input_dev->phys = "ixp4xx/gpio";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x001f;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_SND);
- input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
- input_dev->event = ixp4xx_spkr_event;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- err = irq;
- goto err_free_device;
- }
-
- err = gpio_request(dev->id, "ixp4-beeper");
- if (err)
- goto err_free_device;
-
- err = request_irq(irq, &ixp4xx_spkr_interrupt,
- IRQF_NO_SUSPEND, "ixp4xx-beeper",
- (void *) dev->id);
- if (err)
- goto err_free_gpio;
- ixp4xx_timer2_irq = irq;
-
- err = input_register_device(input_dev);
- if (err)
- goto err_free_irq;
-
- platform_set_drvdata(dev, input_dev);
-
- return 0;
-
- err_free_irq:
- free_irq(irq, (void *)dev->id);
- err_free_gpio:
- gpio_free(dev->id);
- err_free_device:
- input_free_device(input_dev);
-
- return err;
-}
-
-static int ixp4xx_spkr_remove(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- input_unregister_device(input_dev);
-
- /* turn the speaker off */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-
- free_irq(ixp4xx_timer2_irq, (void *)dev->id);
- gpio_free(dev->id);
-
- return 0;
-}
-
-static void ixp4xx_spkr_shutdown(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- /* turn off the speaker */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-}
-
-static struct platform_driver ixp4xx_spkr_platform_driver = {
- .driver = {
- .name = "ixp4xx-beeper",
- },
- .probe = ixp4xx_spkr_probe,
- .remove = ixp4xx_spkr_remove,
- .shutdown = ixp4xx_spkr_shutdown,
-};
-module_platform_driver(ixp4xx_spkr_platform_driver);
-
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 10e3fc0eac6e..33609603245d 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -284,7 +284,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
if (pwrkey->data->supports_ps_hold_poff_config) {
- pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
+ pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify;
error = register_reboot_notifier(&pwrkey->reboot_notifier);
if (error) {
dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
deleted file mode 100644
index 7982bf8fb839..000000000000
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Power key driver for SiRF PrimaII
- *
- * Copyright (c) 2013 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/rtc/sirfsoc_rtciobrg.h>
-#include <linux/of.h>
-#include <linux/workqueue.h>
-
-struct sirfsoc_pwrc_drvdata {
- u32 pwrc_base;
- struct input_dev *input;
- struct delayed_work work;
-};
-
-#define PWRC_ON_KEY_BIT (1 << 0)
-
-#define PWRC_INT_STATUS 0xc
-#define PWRC_INT_MASK 0x10
-#define PWRC_PIN_STATUS 0x14
-#define PWRC_KEY_DETECT_UP_TIME 20 /* ms*/
-
-static int sirfsoc_pwrc_is_on_key_down(struct sirfsoc_pwrc_drvdata *pwrcdrv)
-{
- u32 state = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_PIN_STATUS);
- return !(state & PWRC_ON_KEY_BIT); /* ON_KEY is active low */
-}
-
-static void sirfsoc_pwrc_report_event(struct work_struct *work)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv =
- container_of(work, struct sirfsoc_pwrc_drvdata, work.work);
-
- if (sirfsoc_pwrc_is_on_key_down(pwrcdrv)) {
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
- } else {
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 0);
- input_sync(pwrcdrv->input);
- }
-}
-
-static irqreturn_t sirfsoc_pwrc_isr(int irq, void *dev_id)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_id;
- u32 int_status;
-
- int_status = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_INT_STATUS);
- sirfsoc_rtc_iobrg_writel(int_status & ~PWRC_ON_KEY_BIT,
- pwrcdrv->pwrc_base + PWRC_INT_STATUS);
-
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 1);
- input_sync(pwrcdrv->input);
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
-
- return IRQ_HANDLED;
-}
-
-static void sirfsoc_pwrc_toggle_interrupts(struct sirfsoc_pwrc_drvdata *pwrcdrv,
- bool enable)
-{
- u32 int_mask;
-
- int_mask = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base + PWRC_INT_MASK);
- if (enable)
- int_mask |= PWRC_ON_KEY_BIT;
- else
- int_mask &= ~PWRC_ON_KEY_BIT;
- sirfsoc_rtc_iobrg_writel(int_mask, pwrcdrv->pwrc_base + PWRC_INT_MASK);
-}
-
-static int sirfsoc_pwrc_open(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
-
- return 0;
-}
-
-static void sirfsoc_pwrc_close(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
- cancel_delayed_work_sync(&pwrcdrv->work);
-}
-
-static const struct of_device_id sirfsoc_pwrc_of_match[] = {
- { .compatible = "sirf,prima2-pwrc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
-
-static int sirfsoc_pwrc_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct sirfsoc_pwrc_drvdata *pwrcdrv;
- int irq;
- int error;
-
- pwrcdrv = devm_kzalloc(&pdev->dev, sizeof(struct sirfsoc_pwrc_drvdata),
- GFP_KERNEL);
- if (!pwrcdrv) {
- dev_info(&pdev->dev, "Not enough memory for the device data\n");
- return -ENOMEM;
- }
-
- /*
- * We can't use of_iomap because pwrc is not mapped in memory,
- * the so-called base address is only offset in rtciobrg
- */
- error = of_property_read_u32(np, "reg", &pwrcdrv->pwrc_base);
- if (error) {
- dev_err(&pdev->dev,
- "unable to find base address of pwrc node in dtb\n");
- return error;
- }
-
- pwrcdrv->input = devm_input_allocate_device(&pdev->dev);
- if (!pwrcdrv->input)
- return -ENOMEM;
-
- pwrcdrv->input->name = "sirfsoc pwrckey";
- pwrcdrv->input->phys = "pwrc/input0";
- pwrcdrv->input->evbit[0] = BIT_MASK(EV_KEY);
- input_set_capability(pwrcdrv->input, EV_KEY, KEY_POWER);
-
- INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event);
-
- pwrcdrv->input->open = sirfsoc_pwrc_open;
- pwrcdrv->input->close = sirfsoc_pwrc_close;
-
- input_set_drvdata(pwrcdrv->input, pwrcdrv);
-
- /* Make sure the device is quiesced */
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
-
- irq = platform_get_irq(pdev, 0);
- error = devm_request_irq(&pdev->dev, irq,
- sirfsoc_pwrc_isr, 0,
- "sirfsoc_pwrc_int", pwrcdrv);
- if (error) {
- dev_err(&pdev->dev, "unable to claim irq %d, error: %d\n",
- irq, error);
- return error;
- }
-
- error = input_register_device(pwrcdrv->input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device, error: %d\n",
- error);
- return error;
- }
-
- dev_set_drvdata(&pdev->dev, pwrcdrv);
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
- struct input_dev *input = pwrcdrv->input;
-
- /*
- * Do not mask pwrc interrupt as we want pwrc work as a wakeup source
- * if users touch X_ONKEY_B, see arch/arm/mach-prima2/pm.c
- */
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
- mutex_unlock(&input->mutex);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
-
-static struct platform_driver sirfsoc_pwrc_driver = {
- .probe = sirfsoc_pwrc_probe,
- .driver = {
- .name = "sirfsoc-pwrc",
- .pm = &sirfsoc_pwrc_pm_ops,
- .of_match_table = sirfsoc_pwrc_of_match,
- }
-};
-
-module_platform_driver(sirfsoc_pwrc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
-MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
-MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index dc4a240f4489..3c84deefa327 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -55,8 +55,9 @@
#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
-#define ETP_PRODUCT_ID_DELBIN 0x00C2
+#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
#define ETP_PRODUCT_ID_VOXEL 0x00BF
+#define ETP_PRODUCT_ID_DELBIN 0x00C2
#define ETP_PRODUCT_ID_MAGPIE 0x0120
#define ETP_PRODUCT_ID_BOBBA 0x0121
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index dad22c1ea6a0..47af62c12267 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
u32 quirks;
} elan_i2c_quirks[] = {
{ 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
{ 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 3ac57a91ede4..51b68501896c 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -220,16 +220,4 @@ static struct parport_driver parkbd_parport_driver = {
.detach = parkbd_detach,
.devmodel = true,
};
-
-static int __init parkbd_init(void)
-{
- return parport_register_driver(&parkbd_parport_driver);
-}
-
-static void __exit parkbd_exit(void)
-{
- parport_unregister_driver(&parkbd_parport_driver);
-}
-
-module_init(parkbd_init);
-module_exit(parkbd_exit);
+module_parport_driver(parkbd_parport_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ad454cd2855a..d4e74738c5a8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -932,7 +932,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
- Elo TouchSystems 2700 IntelliTouch
- - EasyTouch USB Touch Controller from Data Modul
+ - EasyTouch USB Touch Controller from Data Module
- e2i (Mimo monitors)
Have a look at <http://linux.chapter7.ch/touchkit/> for
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 263de3bfb6cd..bb2e1cbffba7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -899,6 +899,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
* the identification registers.
*/
switch (rdbuf[0]) {
+ case 0x11: /* EDT EP0110M09 */
case 0x35: /* EDT EP0350M09 */
case 0x43: /* EDT EP0430M09 */
case 0x50: /* EDT EP0500M09 */
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 0efd1a1bb192..9fa3b0e421be 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
+ TYPE_MMS134S = 134,
TYPE_MMS136 = 136,
TYPE_MMS152 = 152,
TYPE_MMS345L = 345,
@@ -212,7 +213,7 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
goto out;
/* MMS136 has slightly different event size */
- if (data->type == TYPE_MMS136)
+ if (data->type == TYPE_MMS134S || data->type == TYPE_MMS136)
touch_size = packet_size / MMS136_EVENT_SIZE;
else
touch_size = packet_size / MMS114_EVENT_SIZE;
@@ -281,6 +282,7 @@ static int mms114_get_version(struct mms114_data *data)
break;
case TYPE_MMS114:
+ case TYPE_MMS134S:
case TYPE_MMS136:
error = __mms114_read_reg(data, MMS114_TSP_REV, 6, buf);
if (error)
@@ -304,8 +306,9 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* Only MMS114 and MMS136 have configuration and power on registers */
- if (data->type != TYPE_MMS114 && data->type != TYPE_MMS136)
+ /* MMS114, MMS134S and MMS136 have configuration and power on registers */
+ if (data->type != TYPE_MMS114 && data->type != TYPE_MMS134S &&
+ data->type != TYPE_MMS136)
return 0;
error = mms114_set_active(data, true);
@@ -487,7 +490,8 @@ static int mms114_probe(struct i2c_client *client,
0, data->props.max_y, 0, 0);
}
- if (data->type == TYPE_MMS114 || data->type == TYPE_MMS136) {
+ if (data->type == TYPE_MMS114 || data->type == TYPE_MMS134S ||
+ data->type == TYPE_MMS136) {
/*
* The firmware handles movement and pressure fuzz, so
* don't duplicate that in software.
@@ -612,6 +616,9 @@ static const struct of_device_id mms114_dt_match[] = {
.compatible = "melfas,mms114",
.data = (void *)TYPE_MMS114,
}, {
+ .compatible = "melfas,mms134s",
+ .data = (void *)TYPE_MMS134S,
+ }, {
.compatible = "melfas,mms136",
.data = (void *)TYPE_MMS136,
}, {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ad8618b3530..124c41adeca1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
choice
prompt "IOMMU default domain type"
depends on IOMMU_API
- default IOMMU_DEFAULT_DMA_LAZY if AMD_IOMMU || INTEL_IOMMU
+ default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64
default IOMMU_DEFAULT_DMA_STRICT
help
Choose the type of IOMMU domain used to manage DMA API usage by
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index bdcf167b4afe..2a822b229bd0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -297,6 +297,22 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+#ifdef CONFIG_IRQ_REMAP
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ bool ret = false;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ ret = iommu_feature(iommu, mask);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+#endif
+
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
@@ -813,9 +829,9 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
return 0;
}
-#ifdef CONFIG_IRQ_REMAP
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
+#ifdef CONFIG_IRQ_REMAP
u64 entry;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
@@ -845,25 +861,9 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
-static int iommu_init_ga(struct amd_iommu *iommu)
-{
- int ret = 0;
-
-#ifdef CONFIG_IRQ_REMAP
- /* Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !iommu_feature(iommu, FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- ret = iommu_init_ga_log(iommu);
+#else
+ return 0;
#endif /* CONFIG_IRQ_REMAP */
-
- return ret;
}
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
@@ -1845,7 +1845,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga(iommu);
+ ret = iommu_init_ga_log(iommu);
if (ret)
return ret;
@@ -2479,6 +2479,14 @@ static void early_enable_iommus(void)
}
#ifdef CONFIG_IRQ_REMAP
+ /*
+ * Note: We have already checked GASup from IVRS table.
+ * Now, we need to make sure that GAMSup is set.
+ */
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
#endif
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 2014fe8695ac..0c228787704f 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -514,9 +514,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
{
mutex_lock(&mm->context.lock);
- /* Synchronize with READ_ONCE in update_pasid(). */
- smp_store_release(&mm->pasid, pasid);
-
/* Update PASID MSR on all CPUs running the mm's tasks. */
on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
@@ -792,7 +789,19 @@ prq_retry:
goto prq_retry;
}
+ /*
+ * A work in IO page fault workqueue may try to lock pasid_mutex now.
+ * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
+ * all works in the workqueue to finish may cause deadlock.
+ *
+ * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
+ * Unlock it to allow the works to be handled while waiting for
+ * them to finish.
+ */
+ lockdep_assert_held(&pasid_mutex);
+ mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev);
+ mutex_lock(&pasid_mutex);
/*
* Perform steps described in VT-d spec CH7.10 to drain page
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 0af42fb93a49..9e8bc802ac05 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -519,6 +519,7 @@ retry:
return new_iova->pfn_lo;
}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
/**
* free_iova_fast - free iova pfn range into rcache
@@ -536,6 +537,7 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
free_iova(iovad, pfn);
}
+EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 94fb63a7b357..fe63d5ee201b 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -570,7 +570,7 @@ fail_msg_node:
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
- memblock_free(__pa(smu), sizeof(struct smu_device));
+ memblock_free_ptr(smu, sizeof(struct smu_device));
smu = NULL;
fail_np:
of_node_put(np);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b03eabc1ed7c..2111daaacaba 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -809,14 +809,9 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data, id;
- bool rc;
+ int blocksize = *(int *) data;
- id = dax_read_lock();
- rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
- dax_read_unlock(id);
-
- return rc;
+ return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
}
/* Check devices support synchronous DAX */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 84e9145b1714..a011d09cb0fa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -654,7 +654,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
return 0;
}
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index a3ce5500d355..0f08c05333ea 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/units.h>
#include <media/media-entity.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
@@ -64,7 +65,6 @@
/* Test pattern control */
#define OV02A10_REG_TEST_PATTERN 0xb6
-#define HZ_PER_MHZ 1000000L
#define OV02A10_LINK_FREQ_390MHZ (390 * HZ_PER_MHZ)
#define OV02A10_ECLK_FREQ (24 * HZ_PER_MHZ)
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 5d8b48288cf4..6ebe3c7001ff 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -10,4 +10,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/firmware_if.o
+ common/command_submission.o common/firmware_if.o \
+ common/state_dump.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 719168c980a4..8132a84698d5 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -314,8 +314,6 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
- if (rc < 0)
- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
@@ -552,7 +550,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_private_data = cb;
- rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
+ rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
cb->bus_address, cb->size);
if (rc) {
spin_lock(&cb->lock);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 80c60fb41bbc..7b0516cf808b 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -38,7 +38,11 @@ static void hl_sob_reset(struct kref *ref)
kref);
struct hl_device *hdev = hw_sob->hdev;
+ dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
+
hdev->asic_funcs->reset_sob(hdev, hw_sob);
+
+ hw_sob->need_reset = false;
}
void hl_sob_reset_error(struct kref *ref)
@@ -52,6 +56,24 @@ void hl_sob_reset_error(struct kref *ref)
hw_sob->q_idx, hw_sob->sob_id);
}
+void hw_sob_put(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset);
+}
+
+static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset_error);
+}
+
+void hw_sob_get(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_get(&hw_sob->kref);
+}
+
/**
* hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
* @sob_base: sob base id
@@ -84,76 +106,29 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
return 0;
}
-static void sob_reset_work(struct work_struct *work)
-{
- struct hl_cs_compl *hl_cs_cmpl =
- container_of(work, struct hl_cs_compl, sob_reset_work);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
-
- /*
- * A signal CS can get completion while the corresponding wait
- * for signal CS is on its way to the PQ. The wait for signal CS
- * will get stuck if the signal CS incremented the SOB to its
- * max value and there are no pending (submitted) waits on this
- * SOB.
- * We do the following to void this situation:
- * 1. The wait for signal CS must get a ref for the signal CS as
- * soon as possible in cs_ioctl_signal_wait() and put it
- * before being submitted to the PQ but after it incremented
- * the SOB refcnt in init_signal_wait_cs().
- * 2. Signal/Wait for signal CS will decrement the SOB refcnt
- * here.
- * These two measures guarantee that the wait for signal CS will
- * reset the SOB upon completion rather than the signal CS and
- * hence the above scenario is avoided.
- */
- kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
-
- if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->reset_sob_group(hdev,
- hl_cs_cmpl->sob_group);
-
- kfree(hl_cs_cmpl);
-}
-
static void hl_fence_release(struct kref *kref)
{
struct hl_fence *fence =
container_of(kref, struct hl_fence, refcount);
struct hl_cs_compl *hl_cs_cmpl =
container_of(fence, struct hl_cs_compl, base_fence);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
- /* EBUSY means the CS was never submitted and hence we don't have
- * an attached hw_sob object that we should handle here
- */
- if (fence->error == -EBUSY)
- goto free;
-
- if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
- (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
- (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
-
- dev_dbg(hdev->dev,
- "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
- hl_cs_cmpl->cs_seq,
- hl_cs_cmpl->type,
- hl_cs_cmpl->hw_sob->sob_id,
- hl_cs_cmpl->sob_val);
-
- queue_work(hdev->sob_reset_wq, &hl_cs_cmpl->sob_reset_work);
-
- return;
- }
-
-free:
kfree(hl_cs_cmpl);
}
void hl_fence_put(struct hl_fence *fence)
{
- if (fence)
- kref_put(&fence->refcount, hl_fence_release);
+ if (IS_ERR_OR_NULL(fence))
+ return;
+ kref_put(&fence->refcount, hl_fence_release);
+}
+
+void hl_fences_put(struct hl_fence **fence, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, fence++)
+ hl_fence_put(*fence);
}
void hl_fence_get(struct hl_fence *fence)
@@ -473,11 +448,139 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
spin_unlock(&hdev->cs_mirror_lock);
}
+/*
+ * force_complete_multi_cs - complete all contexts that wait on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+static void force_complete_multi_cs(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+
+ spin_lock(&mcs_compl->lock);
+
+ if (!mcs_compl->used) {
+ spin_unlock(&mcs_compl->lock);
+ continue;
+ }
+
+ /* when calling force complete no context should be waiting on
+ * multi-cS.
+ * We are calling the function as a protection for such case
+ * to free any pending context and print error message
+ */
+ dev_err(hdev->dev,
+ "multi-CS completion context %d still waiting when calling force completion\n",
+ i);
+ complete_all(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+/*
+ * complete_multi_cs - complete all waiting entities on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @cs: CS structure
+ * The function signals a waiting entity that has an overlapping stream masters
+ * with the completed CS.
+ * For example:
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 5. don't send signal as no
+ * common stream master QID
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 4. send signal as stream
+ * master QID 4 is common
+ */
+static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_fence *fence = cs->fence;
+ int i;
+
+ /* in case of multi CS check for completion only for the first CS */
+ if (cs->staged_cs && !cs->staged_first)
+ return;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+ if (!mcs_compl->used)
+ continue;
+
+ spin_lock(&mcs_compl->lock);
+
+ /*
+ * complete if:
+ * 1. still waiting for completion
+ * 2. the completed CS has at least one overlapping stream
+ * master with the stream masters in the completion
+ */
+ if (mcs_compl->used &&
+ (fence->stream_master_qid_map &
+ mcs_compl->stream_master_qid_map)) {
+ /* extract the timestamp only of first completed CS */
+ if (!mcs_compl->timestamp)
+ mcs_compl->timestamp =
+ ktime_to_ns(fence->timestamp);
+ complete_all(&mcs_compl->completion);
+ }
+
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
+ struct hl_cs *cs,
+ struct hl_cs_compl *hl_cs_cmpl)
+{
+ /* Skip this handler if the cs wasn't submitted, to avoid putting
+ * the hw_sob twice, since this case already handled at this point,
+ * also skip if the hw_sob pointer wasn't set.
+ */
+ if (!hl_cs_cmpl->hw_sob || !cs->submitted)
+ return;
+
+ spin_lock(&hl_cs_cmpl->lock);
+
+ /*
+ * we get refcount upon reservation of signals or signal/wait cs for the
+ * hw_sob object, and need to put it when the first staged cs
+ * (which cotains the encaps signals) or cs signal/wait is completed.
+ */
+ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
+ (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
+ (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
+ (!!hl_cs_cmpl->encaps_signals)) {
+ dev_dbg(hdev->dev,
+ "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
+ hl_cs_cmpl->cs_seq,
+ hl_cs_cmpl->type,
+ hl_cs_cmpl->hw_sob->sob_id,
+ hl_cs_cmpl->sob_val);
+
+ hw_sob_put(hl_cs_cmpl->hw_sob);
+
+ if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+ hdev->asic_funcs->reset_sob_group(hdev,
+ hl_cs_cmpl->sob_group);
+ }
+
+ spin_unlock(&hl_cs_cmpl->lock);
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(cs->fence, struct hl_cs_compl, base_fence);
cs->completed = true;
@@ -493,8 +596,9 @@ static void cs_do_release(struct kref *ref)
complete_job(hdev, job);
if (!cs->submitted) {
- /* In case the wait for signal CS was submitted, the put occurs
- * in init_signal_wait_cs() or collective_wait_init_cs()
+ /*
+ * In case the wait for signal CS was submitted, the fence put
+ * occurs in init_signal_wait_cs() or collective_wait_init_cs()
* right before hanging on the PQ.
*/
if (cs->type == CS_TYPE_WAIT ||
@@ -535,8 +639,20 @@ static void cs_do_release(struct kref *ref)
list_del(&cs->staged_cs_node);
spin_unlock(&hdev->cs_mirror_lock);
}
+
+ /* decrement refcount to handle when first staged cs
+ * with encaps signals is completed.
+ */
+ if (hl_cs_cmpl->encaps_signals)
+ kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
}
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ kref_put(&cs->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
+
out:
/* Must be called before hl_ctx_put because inside we use ctx to get
* the device
@@ -566,6 +682,10 @@ out:
if (cs->timestamp)
cs->fence->timestamp = ktime_get();
complete_all(&cs->fence->completion);
+ complete_multi_cs(hdev, cs);
+
+ cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
+
hl_fence_put(cs->fence);
kfree(cs->jobs_in_queue_cnt);
@@ -621,6 +741,10 @@ static void cs_timedout(struct work_struct *work)
break;
}
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev, "Error during system state dump %d\n", rc);
+
cs_put(cs);
if (likely(!skip_reset_on_timeout)) {
@@ -661,6 +785,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->completed = false;
cs->type = cs_type;
cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
+ cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
hdev->skip_reset_on_timeout ||
@@ -671,9 +796,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl)
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_KERNEL);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
if (!cs_cmpl) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
@@ -698,7 +823,6 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl->hdev = hdev;
cs_cmpl->type = cs->type;
spin_lock_init(&cs_cmpl->lock);
- INIT_WORK(&cs_cmpl->sob_reset_work, sob_reset_work);
cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
@@ -791,31 +915,22 @@ void hl_cs_rollback_all(struct hl_device *hdev)
cs_rollback(hdev, cs);
cs_put(cs);
}
-}
-
-void hl_pending_cb_list_flush(struct hl_ctx *ctx)
-{
- struct hl_pending_cb *pending_cb, *tmp;
- list_for_each_entry_safe(pending_cb, tmp,
- &ctx->pending_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- hl_cb_put(pending_cb->cb);
- kfree(pending_cb);
- }
+ force_complete_multi_cs(hdev);
}
static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend;
+ unsigned long flags;
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
pend->fence.error = -EIO;
complete_all(&pend->fence.completion);
}
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -981,6 +1096,10 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_TYPE_WAIT;
else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
return CS_TYPE_COLLECTIVE_WAIT;
+ else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
+ return CS_RESERVE_SIGNALS;
+ else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
+ return CS_UNRESERVE_SIGNALS;
else
return CS_TYPE_DEFAULT;
}
@@ -1081,7 +1200,8 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
}
static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
- u64 sequence, u32 flags)
+ u64 sequence, u32 flags,
+ u32 encaps_signal_handle)
{
if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
return 0;
@@ -1093,6 +1213,9 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
/* Staged CS sequence is the first CS sequence */
INIT_LIST_HEAD(&cs->staged_cs_node);
cs->staged_sequence = cs->sequence;
+
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl_id = encaps_signal_handle;
} else {
/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
* under the cs_mirror_lock
@@ -1108,9 +1231,20 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
return 0;
}
+static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
+{
+ int i;
+
+ for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
+ if (qid == hdev->stream_master_qid_arr[i])
+ return BIT(i);
+
+ return 0;
+}
+
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
- u32 timeout)
+ u32 encaps_signals_handle, u32 timeout)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1121,6 +1255,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs *cs;
struct hl_cb *cb;
u64 user_sequence;
+ u8 stream_master_qid_map = 0;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
@@ -1148,7 +1283,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
hl_debugfs_add_cs(cs);
- rc = cs_staged_submission(hdev, cs, user_sequence, flags);
+ rc = cs_staged_submission(hdev, cs, user_sequence, flags,
+ encaps_signals_handle);
if (rc)
goto free_cs_object;
@@ -1179,9 +1315,20 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ if (queue_type == QUEUE_TYPE_EXT ||
+ queue_type == QUEUE_TYPE_HW) {
int_queues_only = false;
+ /*
+ * store which stream are being used for external/HW
+ * queues of this CS
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ stream_master_qid_map |=
+ get_stream_master_qid_mask(hdev,
+ chunk->queue_index);
+ }
+
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
@@ -1242,6 +1389,13 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ /*
+ * store the (external/HW queues) streams used by the CS in the
+ * fence object for multi-CS completion
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ cs->fence->stream_master_qid_map = stream_master_qid_map;
+
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
if (rc != -EAGAIN)
@@ -1270,130 +1424,6 @@ out:
return rc;
}
-static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
-{
- struct hw_queue_properties *hw_queue_prop;
- struct hl_cs_counters_atomic *cntr;
- struct hl_cs_job *job;
-
- hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
- cntr = &hdev->aggregated_cs_counters;
-
- job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
- if (!job) {
- atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
- atomic64_inc(&cntr->out_of_mem_drop_cnt);
- dev_err(hdev->dev, "Failed to allocate a new job\n");
- return -ENOMEM;
- }
-
- job->id = 0;
- job->cs = cs;
- job->user_cb = cb;
- atomic_inc(&job->user_cb->cs_cnt);
- job->user_cb_size = size;
- job->hw_queue_id = hw_queue_id;
- job->patched_cb = job->user_cb;
- job->job_cb_size = job->user_cb_size;
-
- /* increment refcount as for external queues we get completion */
- cs_get(cs);
-
- cs->jobs_in_queue_cnt[job->hw_queue_id]++;
-
- list_add_tail(&job->cs_node, &cs->job_list);
-
- hl_debugfs_add_job(hdev, job);
-
- return 0;
-}
-
-static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
-{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ctx *ctx = hpriv->ctx;
- struct hl_pending_cb *pending_cb, *tmp;
- struct list_head local_cb_list;
- struct hl_cs *cs;
- struct hl_cb *cb;
- u32 hw_queue_id;
- u32 cb_size;
- int process_list, rc = 0;
-
- if (list_empty(&ctx->pending_cb_list))
- return 0;
-
- process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
-
- /* Only a single thread is allowed to process the list */
- if (!process_list)
- return 0;
-
- if (list_empty(&ctx->pending_cb_list))
- goto free_pending_cb_token;
-
- /* move all list elements to a local list */
- INIT_LIST_HEAD(&local_cb_list);
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
- cb_node)
- list_move_tail(&pending_cb->cb_node, &local_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs, 0,
- hdev->timeout_jiffies);
- if (rc)
- goto add_list_elements;
-
- hl_debugfs_add_cs(cs);
-
- /* Iterate through pending cb list, create jobs and add to CS */
- list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
- cb = pending_cb->cb;
- cb_size = pending_cb->cb_size;
- hw_queue_id = pending_cb->hw_queue_id;
-
- rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
- hw_queue_id);
- if (rc)
- goto free_cs_object;
- }
-
- rc = hl_hw_queue_schedule_cs(cs);
- if (rc) {
- if (rc != -EAGAIN)
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu (%d)\n",
- ctx->asid, cs->sequence, rc);
- goto free_cs_object;
- }
-
- /* pending cb was scheduled successfully */
- list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- kfree(pending_cb);
- }
-
- cs_put(cs);
-
- goto free_pending_cb_token;
-
-free_cs_object:
- cs_rollback(hdev, cs);
- cs_put(cs);
-add_list_elements:
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
- cb_node)
- list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-free_pending_cb_token:
- atomic_set(&ctx->thread_pending_cb_token, 1);
-
- return rc;
-}
-
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
@@ -1443,7 +1473,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0, hdev->timeout_jiffies);
+ cs_seq, 0, 0, hdev->timeout_jiffies);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1501,10 +1531,17 @@ out:
* hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
* if the SOB value reaches the max value move to the other SOB reserved
* to the queue.
+ * @hdev: pointer to device structure
+ * @q_idx: stream queue index
+ * @hw_sob: the H/W SOB used in this signal CS.
+ * @count: signals count
+ * @encaps_sig: tells whether it's reservation for encaps signals or not.
+ *
* Note that this function must be called while hw_queues_lock is taken.
*/
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count)
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
+
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *sob = *hw_sob, *other_sob;
@@ -1512,7 +1549,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
- kref_get(&sob->kref);
+ hw_sob_get(sob);
/* check for wraparound */
if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
@@ -1522,7 +1559,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
* just incremented the refcount right before calling this
* function.
*/
- kref_put(&sob->kref, hl_sob_reset_error);
+ hw_sob_put_err(sob);
/*
* check the other sob value, if it still in use then fail
@@ -1537,12 +1574,42 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
return -EINVAL;
}
- prop->next_sob_val = 1;
+ /*
+ * next_sob_val always points to the next available signal
+ * in the sob, so in encaps signals it will be the next one
+ * after reserving the required amount.
+ */
+ if (encaps_sig)
+ prop->next_sob_val = count + 1;
+ else
+ prop->next_sob_val = count;
/* only two SOBs are currently in use */
prop->curr_sob_offset = other_sob_offset;
*hw_sob = other_sob;
+ /*
+ * check if other_sob needs reset, then do it before using it
+ * for the reservation or the next signal cs.
+ * we do it here, and for both encaps and regular signal cs
+ * cases in order to avoid possible races of two kref_put
+ * of the sob which can occur at the same time if we move the
+ * sob reset(kref_put) to cs_do_release function.
+ * in addition, if we have combination of cs signal and
+ * encaps, and at the point we need to reset the sob there was
+ * no more reservations and only signal cs keep coming,
+ * in such case we need signal_cs to put the refcount and
+ * reset the sob.
+ */
+ if (other_sob->need_reset)
+ hw_sob_put(other_sob);
+
+ if (encaps_sig) {
+ /* set reset indication for the sob */
+ sob->need_reset = true;
+ hw_sob_get(other_sob);
+ }
+
dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
prop->curr_sob_offset, q_idx);
} else {
@@ -1553,12 +1620,18 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
+ bool encaps_signals)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
int rc = 0;
+ if (encaps_signals) {
+ *signal_seq = chunk->encaps_signal_seq;
+ return 0;
+ }
+
signal_seq_arr_len = chunk->num_signal_seq_arr;
/* currently only one signal seq is supported */
@@ -1583,7 +1656,7 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
return -ENOMEM;
}
- size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
+ size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
@@ -1605,8 +1678,8 @@ out:
}
static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
- u32 q_idx)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
{
struct hl_cs_counters_atomic *cntr;
struct hl_cs_job *job;
@@ -1644,6 +1717,9 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = q_idx;
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need the CB in
@@ -1666,11 +1742,196 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
return 0;
}
+static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
+ u32 q_idx, u32 count,
+ u32 *handle_id, u32 *sob_addr,
+ u32 *signals_count)
+{
+ struct hw_queue_properties *hw_queue_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_encaps_sig_handle *handle;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ int hdl_id;
+ int rc = 0;
+
+ if (count >= HL_MAX_SOB_VAL) {
+ dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
+ count);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (q_idx >= hdev->asic_prop.max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+
+ if (!hw_queue_prop->supports_sync_stream) {
+ dev_err(hdev->dev,
+ "Queue index %d does not support sync stream operations\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ handle->count = count;
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mgr->lock);
+
+ if (hdl_id < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ handle->id = hdl_id;
+ handle->q_idx = q_idx;
+ handle->hdev = hdev;
+ kref_init(&handle->refcount);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+
+ /*
+ * Increment the SOB value by count by user request
+ * to reserve those signals
+ * check if the signals amount to reserve is not exceeding the max sob
+ * value, if yes then switch sob.
+ */
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
+ true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to switch SOB\n");
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto remove_idr;
+ }
+ /* set the hw_sob to the handle after calling the sob wraparound handler
+ * since sob could have changed.
+ */
+ handle->hw_sob = hw_sob;
+
+ /* store the current sob value for unreserve validity check, and
+ * signal offset support
+ */
+ handle->pre_sob_val = prop->next_sob_val - handle->count;
+
+ *signals_count = prop->next_sob_val;
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ *sob_addr = handle->hw_sob->sob_addr;
+ *handle_id = hdl_id;
+
+ dev_dbg(hdev->dev,
+ "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
+ hw_sob->sob_id, handle->hw_sob->sob_addr,
+ prop->next_sob_val - 1, q_idx, hdl_id);
+ goto out;
+
+remove_idr:
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, hdl_id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+out:
+ return rc;
+}
+
+static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
+{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ u32 q_idx, sob_addr;
+ int rc = 0;
+
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
+ if (encaps_sig_hdl) {
+ dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
+ handle_id, encaps_sig_hdl->hw_sob->sob_addr,
+ encaps_sig_hdl->count);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ q_idx = encaps_sig_hdl->q_idx;
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+ sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
+
+ /* Check if sob_val got out of sync due to other
+ * signal submission requests which were handled
+ * between the reserve-unreserve calls or SOB switch
+ * upon reaching SOB max value.
+ */
+ if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
+ != prop->next_sob_val ||
+ sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
+ dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
+ encaps_sig_hdl->pre_sob_val,
+ (prop->next_sob_val - encaps_sig_hdl->count));
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Decrement the SOB value by count by user request
+ * to unreserve those signals
+ */
+ prop->next_sob_val -= encaps_sig_hdl->count;
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ hw_sob_put(hw_sob);
+
+ /* Release the id and free allocated memory of the handle */
+ idr_remove(&mgr->handles, handle_id);
+ kfree(encaps_sig_hdl);
+ } else {
+ rc = -EINVAL;
+ dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
+ }
+out:
+ spin_unlock(&mgr->lock);
+
+ return rc;
+}
+
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
u64 *cs_seq, u32 flags, u32 timeout)
{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
+ bool handle_found = false, is_wait_cs = false,
+ wait_cs_submitted = false,
+ cs_encaps_signals = false;
struct hl_cs_chunk *cs_chunk_array, *chunk;
+ bool staged_cs_with_encaps_signals = false;
struct hw_queue_properties *hw_queue_prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
@@ -1730,11 +1991,58 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
collective_engine_id = chunk->collective_engine_id;
}
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
+ is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT);
+
+ cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
+
+ if (is_wait_cs) {
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
+ ctx, cs_encaps_signals);
if (rc)
goto free_cs_chunk_array;
+ if (cs_encaps_signals) {
+ /* check if cs sequence has encapsulated
+ * signals handle
+ */
+ struct idr *idp;
+ u32 id;
+
+ spin_lock(&ctx->sig_mgr.lock);
+ idp = &ctx->sig_mgr.handles;
+ idr_for_each_entry(idp, encaps_sig_hdl, id) {
+ if (encaps_sig_hdl->cs_seq == signal_seq) {
+ handle_found = true;
+ /* get refcount to protect removing
+ * this handle from idr, needed when
+ * multiple wait cs are used with offset
+ * to wait on reserved encaps signals.
+ */
+ kref_get(&encaps_sig_hdl->refcount);
+ break;
+ }
+ }
+ spin_unlock(&ctx->sig_mgr.lock);
+
+ if (!handle_found) {
+ dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ signal_seq);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ /* validate also the signal offset value */
+ if (chunk->encaps_signal_offset >
+ encaps_sig_hdl->count) {
+ dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
+ chunk->encaps_signal_offset,
+ encaps_sig_hdl->count);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+ }
+
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
@@ -1755,11 +2063,16 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
sig_waitcs_cmpl =
container_of(sig_fence, struct hl_cs_compl, base_fence);
- if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ staged_cs_with_encaps_signals = !!
+ (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
+ (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
+
+ if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
+ !staged_cs_with_encaps_signals) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
- "CS seq 0x%llx is not of a signal CS\n",
+ "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
signal_seq);
hl_fence_put(sig_fence);
rc = -EINVAL;
@@ -1776,18 +2089,27 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
if (rc) {
- if (cs_type == CS_TYPE_WAIT ||
- cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs)
hl_fence_put(sig_fence);
+
goto free_cs_chunk_array;
}
/*
* Save the signal CS fence for later initialization right before
* hanging the wait CS on the queue.
+ * for encaps signals case, we save the cs sequence and handle pointer
+ * for later initialization.
*/
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs) {
cs->signal_fence = sig_fence;
+ /* store the handle pointer, so we don't have to
+ * look for it again, later on the flow
+ * when we need to set SOB info in hw_queue.
+ */
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl = encaps_sig_hdl;
+ }
hl_debugfs_add_cs(cs);
@@ -1795,10 +2117,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
- q_idx);
+ q_idx, chunk->encaps_signal_offset);
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
- cs, q_idx, collective_engine_id);
+ cs, q_idx, collective_engine_id,
+ chunk->encaps_signal_offset);
else {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
@@ -1810,7 +2133,13 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- if (rc != -EAGAIN)
+ /* In case wait cs failed here, it means the signal cs
+ * already completed. we want to free all it's related objects
+ * but we don't want to fail the ioctl.
+ */
+ if (is_wait_cs)
+ rc = 0;
+ else if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to submit CS %d.%llu to H/W queues, error %d\n",
ctx->asid, cs->sequence, rc);
@@ -1818,6 +2147,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
rc = HL_CS_STATUS_SUCCESS;
+ if (is_wait_cs)
+ wait_cs_submitted = true;
goto put_cs;
free_cs_object:
@@ -1828,6 +2159,10 @@ put_cs:
/* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
+ if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
+ is_wait_cs)
+ kref_put(&encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
kfree(cs_chunk_array);
out:
return rc;
@@ -1836,10 +2171,11 @@ out:
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
- enum hl_cs_type cs_type;
+ enum hl_cs_type cs_type = 0;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks, flags, timeout;
+ u32 num_chunks, flags, timeout,
+ signals_count = 0, sob_addr = 0, handle_id = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1850,10 +2186,6 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
if (rc)
goto out;
- rc = hl_submit_pending_cb(hpriv);
- if (rc)
- goto out;
-
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
@@ -1876,80 +2208,448 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
&cs_seq, args->in.cs_flags, timeout);
break;
+ case CS_RESERVE_SIGNALS:
+ rc = cs_ioctl_reserve_signals(hpriv,
+ args->in.encaps_signals_q_idx,
+ args->in.encaps_signals_count,
+ &handle_id, &sob_addr, &signals_count);
+ break;
+ case CS_UNRESERVE_SIGNALS:
+ rc = cs_ioctl_unreserve_signals(hpriv,
+ args->in.encaps_sig_handle_id);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags, timeout);
+ args->in.cs_flags,
+ args->in.encaps_sig_handle_id,
+ timeout);
break;
}
-
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
+
+ if (cs_type == CS_RESERVE_SIGNALS) {
+ args->out.handle_id = handle_id;
+ args->out.sob_base_addr_offset = sob_addr;
+ args->out.count = signals_count;
+ } else {
+ args->out.seq = cs_seq;
+ }
args->out.status = rc;
- args->out.seq = cs_seq;
}
return rc;
}
+static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
+ enum hl_cs_wait_status *status, u64 timeout_us,
+ s64 *timestamp)
+{
+ struct hl_device *hdev = ctx->hdev;
+ long completion_rc;
+ int rc = 0;
+
+ if (IS_ERR(fence)) {
+ rc = PTR_ERR(fence);
+ if (rc == -EINVAL)
+ dev_notice_ratelimited(hdev->dev,
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
+ return rc;
+ }
+
+ if (!fence) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
+
+ *status = CS_WAIT_STATUS_GONE;
+ return 0;
+ }
+
+ if (!timeout_us) {
+ completion_rc = completion_done(&fence->completion);
+ } else {
+ unsigned long timeout;
+
+ timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
+ timeout_us : usecs_to_jiffies(timeout_us);
+ completion_rc =
+ wait_for_completion_interruptible_timeout(
+ &fence->completion, timeout);
+ }
+
+ if (completion_rc > 0) {
+ *status = CS_WAIT_STATUS_COMPLETED;
+ if (timestamp)
+ *timestamp = ktime_to_ns(fence->timestamp);
+ } else {
+ *status = CS_WAIT_STATUS_BUSY;
+ }
+
+ if (fence->error == -ETIMEDOUT)
+ rc = -ETIMEDOUT;
+ else if (fence->error == -EIO)
+ rc = -EIO;
+
+ return rc;
+}
+
+/*
+ * hl_cs_poll_fences - iterate CS fences to check for CS completion
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * The function iterates on all CS sequence in the list and set bit in
+ * completion_bitmap for each completed CS.
+ * while iterating, the function can extracts the stream map to be later
+ * used by the waiting function.
+ * this function shall be called after taking context ref
+ */
+static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
+{
+ struct hl_fence **fence_ptr = mcs_data->fence_arr;
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ int i, rc, arr_len = mcs_data->arr_len;
+ u64 *seq_arr = mcs_data->seq_arr;
+ ktime_t max_ktime, first_cs_time;
+ enum hl_cs_wait_status status;
+
+ memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
+
+ /* get all fences under the same lock */
+ rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
+ if (rc)
+ return rc;
+
+ /*
+ * set to maximum time to verify timestamp is valid: if at the end
+ * this value is maintained- no timestamp was updated
+ */
+ max_ktime = ktime_set(KTIME_SEC_MAX, 0);
+ first_cs_time = max_ktime;
+
+ for (i = 0; i < arr_len; i++, fence_ptr++) {
+ struct hl_fence *fence = *fence_ptr;
+
+ /*
+ * function won't sleep as it is called with timeout 0 (i.e.
+ * poll the fence)
+ */
+ rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
+ &status, 0, NULL);
+ if (rc) {
+ dev_err(hdev->dev,
+ "wait_for_fence error :%d for CS seq %llu\n",
+ rc, seq_arr[i]);
+ break;
+ }
+
+ mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
+
+ if (status == CS_WAIT_STATUS_BUSY)
+ continue;
+
+ mcs_data->completion_bitmap |= BIT(i);
+
+ /*
+ * best effort to extract timestamp. few notes:
+ * - if even single fence is gone we cannot extract timestamp
+ * (as fence not exist anymore)
+ * - for all completed CSs we take the earliest timestamp.
+ * for this we have to validate that:
+ * 1. given timestamp was indeed set
+ * 2. the timestamp is earliest of all timestamps so far
+ */
+
+ if (status == CS_WAIT_STATUS_GONE) {
+ mcs_data->update_ts = false;
+ mcs_data->gone_cs = true;
+ } else if (mcs_data->update_ts &&
+ (ktime_compare(fence->timestamp,
+ ktime_set(0, 0)) > 0) &&
+ (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
+ first_cs_time = fence->timestamp;
+ }
+ }
+
+ hl_fences_put(mcs_data->fence_arr, arr_len);
+
+ if (mcs_data->update_ts &&
+ (ktime_compare(first_cs_time, max_ktime) != 0))
+ mcs_data->timestamp = ktime_to_ns(first_cs_time);
+
+ return rc;
+}
+
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
- unsigned long timeout;
int rc = 0;
- long completion_rc;
if (timestamp)
*timestamp = 0;
- if (timeout_us == MAX_SCHEDULE_TIMEOUT)
- timeout = timeout_us;
- else
- timeout = usecs_to_jiffies(timeout_us);
-
hl_ctx_get(hdev, ctx);
fence = hl_ctx_get_fence(ctx, seq);
- if (IS_ERR(fence)) {
- rc = PTR_ERR(fence);
- if (rc == -EINVAL)
- dev_notice_ratelimited(hdev->dev,
- "Can't wait on CS %llu because current CS is at seq %llu\n",
- seq, ctx->cs_sequence);
- } else if (fence) {
- if (!timeout_us)
- completion_rc = completion_done(&fence->completion);
- else
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &fence->completion, timeout);
- if (completion_rc > 0) {
- *status = CS_WAIT_STATUS_COMPLETED;
- if (timestamp)
- *timestamp = ktime_to_ns(fence->timestamp);
- } else {
- *status = CS_WAIT_STATUS_BUSY;
+ rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
+ hl_fence_put(fence);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+/*
+ * hl_wait_multi_cs_completion_init - init completion structure
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
+ * master QID to wait on
+ *
+ * @return valid completion struct pointer on success, otherwise error pointer
+ *
+ * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
+ * the function gets the first available completion (by marking it "used")
+ * and initialize its values.
+ */
+static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
+ struct hl_device *hdev,
+ u8 stream_master_bitmap)
+{
+ struct multi_cs_completion *mcs_compl;
+ int i;
+
+ /* find free multi_cs completion structure */
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_compl = &hdev->multi_cs_completion[i];
+ spin_lock(&mcs_compl->lock);
+ if (!mcs_compl->used) {
+ mcs_compl->used = 1;
+ mcs_compl->timestamp = 0;
+ mcs_compl->stream_master_qid_map = stream_master_bitmap;
+ reinit_completion(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ break;
}
+ spin_unlock(&mcs_compl->lock);
+ }
- if (fence->error == -ETIMEDOUT)
- rc = -ETIMEDOUT;
- else if (fence->error == -EIO)
- rc = -EIO;
+ if (i == MULTI_CS_MAX_USER_CTX) {
+ dev_err(hdev->dev,
+ "no available multi-CS completion structure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return mcs_compl;
+}
- hl_fence_put(fence);
- } else {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
- seq, ctx->cs_sequence);
- *status = CS_WAIT_STATUS_GONE;
+/*
+ * hl_wait_multi_cs_completion_fini - return completion structure and set as
+ * unused
+ *
+ * @mcs_compl: pointer to the completion structure
+ */
+static void hl_wait_multi_cs_completion_fini(
+ struct multi_cs_completion *mcs_compl)
+{
+ /*
+ * free completion structure, do it under lock to be in-sync with the
+ * thread that signals completion
+ */
+ spin_lock(&mcs_compl->lock);
+ mcs_compl->used = 0;
+ spin_unlock(&mcs_compl->lock);
+}
+
+/*
+ * hl_wait_multi_cs_completion - wait for first CS to complete
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
+{
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ struct multi_cs_completion *mcs_compl;
+ long completion_rc;
+
+ mcs_compl = hl_wait_multi_cs_completion_init(hdev,
+ mcs_data->stream_master_qid_map);
+ if (IS_ERR(mcs_compl))
+ return PTR_ERR(mcs_compl);
+
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &mcs_compl->completion,
+ usecs_to_jiffies(mcs_data->timeout_us));
+
+ /* update timestamp */
+ if (completion_rc > 0)
+ mcs_data->timestamp = mcs_compl->timestamp;
+
+ hl_wait_multi_cs_completion_fini(mcs_compl);
+
+ mcs_data->wait_status = completion_rc;
+
+ return 0;
+}
+
+/*
+ * hl_multi_cs_completion_init - init array of multi-CS completion structures
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+void hl_multi_cs_completion_init(struct hl_device *hdev)
+{
+ struct multi_cs_completion *mcs_cmpl;
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_cmpl = &hdev->multi_cs_completion[i];
+ mcs_cmpl->used = 0;
+ spin_lock_init(&mcs_cmpl->lock);
+ init_completion(&mcs_cmpl->completion);
+ }
+}
+
+/*
+ * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
+ *
+ * @hpriv: pointer to the private data of the fd
+ * @data: pointer to multi-CS wait ioctl in/out args
+ *
+ */
+static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct multi_cs_data mcs_data = {0};
+ union hl_wait_cs_args *args = data;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_fence **fence_arr;
+ void __user *seq_arr;
+ u32 size_to_copy;
+ u64 *cs_seq_arr;
+ u8 seq_arr_len;
+ int rc;
+
+ if (!hdev->supports_wait_for_multi_cs) {
+ dev_err(hdev->dev, "Wait for multi CS is not supported\n");
+ return -EPERM;
+ }
+
+ seq_arr_len = args->in.seq_arr_len;
+
+ if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
+ dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
+ HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
+ return -EINVAL;
+ }
+
+ /* allocate memory for sequence array */
+ cs_seq_arr =
+ kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
+ if (!cs_seq_arr)
+ return -ENOMEM;
+
+ /* copy CS sequence array from user */
+ seq_arr = (void __user *) (uintptr_t) args->in.seq;
+ size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
+ if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
+ rc = -EFAULT;
+ goto free_seq_arr;
+ }
+
+ /* allocate array for the fences */
+ fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
+ if (!fence_arr) {
+ rc = -ENOMEM;
+ goto free_seq_arr;
+ }
+
+ /* initialize the multi-CS internal data */
+ mcs_data.ctx = ctx;
+ mcs_data.seq_arr = cs_seq_arr;
+ mcs_data.fence_arr = fence_arr;
+ mcs_data.arr_len = seq_arr_len;
+
+ hl_ctx_get(hdev, ctx);
+
+ /* poll all CS fences, extract timestamp */
+ mcs_data.update_ts = true;
+ rc = hl_cs_poll_fences(&mcs_data);
+ /*
+ * skip wait for CS completion when one of the below is true:
+ * - an error on the poll function
+ * - one or more CS in the list completed
+ * - the user called ioctl with timeout 0
+ */
+ if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
+ goto put_ctx;
+
+ /* wait (with timeout) for the first CS to be completed */
+ mcs_data.timeout_us = args->in.timeout_us;
+ rc = hl_wait_multi_cs_completion(&mcs_data);
+ if (rc)
+ goto put_ctx;
+
+ if (mcs_data.wait_status > 0) {
+ /*
+ * poll fences once again to update the CS map.
+ * no timestamp should be updated this time.
+ */
+ mcs_data.update_ts = false;
+ rc = hl_cs_poll_fences(&mcs_data);
+
+ /*
+ * if hl_wait_multi_cs_completion returned before timeout (i.e.
+ * it got a completion) we expect to see at least one CS
+ * completed after the poll function.
+ */
+ if (!mcs_data.completion_bitmap) {
+ dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ rc = -EFAULT;
+ }
}
+put_ctx:
hl_ctx_put(ctx);
+ kfree(fence_arr);
- return rc;
+free_seq_arr:
+ kfree(cs_seq_arr);
+
+ /* update output args */
+ memset(args, 0, sizeof(*args));
+ if (rc)
+ return rc;
+
+ if (mcs_data.completion_bitmap) {
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+ args->out.cs_completion_map = mcs_data.completion_bitmap;
+
+ /* if timestamp not 0- it's valid */
+ if (mcs_data.timestamp) {
+ args->out.timestamp_nsec = mcs_data.timestamp;
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+ }
+
+ /* update if some CS was gone */
+ if (mcs_data.timestamp)
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
+ } else if (mcs_data.wait_status == -ERESTARTSYS) {
+ args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ } else {
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ }
+
+ return 0;
}
static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2015,9 +2715,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
{
struct hl_user_pending_interrupt *pend;
struct hl_user_interrupt *interrupt;
- unsigned long timeout;
- long completion_rc;
+ unsigned long timeout, flags;
u32 completion_value;
+ long completion_rc;
int rc = 0;
if (timeout_us == U32_MAX)
@@ -2040,17 +2740,10 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
- spin_lock(&interrupt->wait_list_lock);
- if (!hl_device_operational(hdev, NULL)) {
- rc = -EPERM;
- goto unlock_and_free_fence;
- }
-
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto unlock_and_free_fence;
+ goto free_fence;
}
if (completion_value >= target_value)
@@ -2059,48 +2752,57 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto unlock_and_free_fence;
+ goto free_fence;
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
wait_again:
/* Wait for interrupt handler to signal completion */
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &pend->fence.completion, timeout);
+ completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
+ timeout);
/* If timeout did not expire we need to perform the comparison.
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- if (copy_from_user(&completion_value,
- u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
+
goto remove_pending_user_interrupt;
}
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ reinit_completion(&pend->fence.completion);
timeout = completion_rc;
+
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
+ } else if (completion_rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for interrupt ID %d\n",
+ interrupt->interrupt_id);
+ *status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ rc = -EINTR;
} else {
*status = CS_WAIT_STATUS_BUSY;
}
remove_pending_user_interrupt:
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_del(&pend->wait_list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-unlock_and_free_fence:
- spin_unlock(&interrupt->wait_list_lock);
+free_fence:
kfree(pend);
hl_ctx_put(ctx);
@@ -2148,8 +2850,9 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "interrupt_wait_ioctl failed (%d)\n", rc);
+ if (rc != -EINTR)
+ dev_err_ratelimited(hdev->dev,
+ "interrupt_wait_ioctl failed (%d)\n", rc);
return rc;
}
@@ -2173,8 +2876,16 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
u32 flags = args->in.flags;
int rc;
+ /* If the device is not operational, no point in waiting for any command submission or
+ * user interrupt
+ */
+ if (!hl_device_operational(hpriv->hdev, NULL))
+ return -EPERM;
+
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
+ else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
+ rc = hl_multi_cs_wait_ioctl(hpriv, data);
else
rc = hl_cs_wait_ioctl(hpriv, data);
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 19b6b045219e..22978303ad63 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -9,16 +9,70 @@
#include <linux/slab.h>
+void hl_encaps_handle_do_release(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_handle_do_release_sob(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ /* if we're here, then there was a signals reservation but cs with
+ * encaps signals wasn't submitted, so need to put refcount
+ * to hw_sob taken at the reservation.
+ */
+ hw_sob_put(handle->hw_sob);
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
+{
+ spin_lock_init(&mgr->lock);
+ idr_init(&mgr->handles);
+}
+
+static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
+ struct hl_encaps_signals_mgr *mgr)
+{
+ struct hl_cs_encaps_sig_handle *handle;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->handles;
+
+ if (!idr_is_empty(idp)) {
+ dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
+ idr_for_each_entry(idp, handle, id)
+ kref_put(&handle->refcount,
+ hl_encaps_handle_do_release_sob);
+ }
+
+ idr_destroy(&mgr->handles);
+}
+
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
int i;
- /* Release all allocated pending cb's, those cb's were never
- * scheduled so it is safe to release them here
- */
- hl_pending_cb_list_flush(ctx);
-
/* Release all allocated HW block mapped list entries and destroy
* the mutex.
*/
@@ -53,6 +107,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
+ hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
/* Scrub both SRAM and DRAM */
hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
@@ -130,9 +185,6 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
{
if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
return;
-
- dev_warn(hdev->dev,
- "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -144,11 +196,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
- INIT_LIST_HEAD(&ctx->pending_cb_list);
- spin_lock_init(&ctx->pending_cb_lock);
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
- atomic_set(&ctx->thread_pending_cb_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
@@ -200,6 +249,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
goto err_cb_va_pool_fini;
}
+ hl_encaps_sig_mgr_init(&ctx->sig_mgr);
+
dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
}
@@ -229,25 +280,40 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
-struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+/*
+ * hl_ctx_get_fence_locked - get CS fence under CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq: CS sequences number
+ *
+ * @return valid fence pointer on success, NULL if fence is gone, otherwise
+ * error pointer.
+ *
+ * NOTE: this function shall be called with cs_lock locked
+ */
+static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
{
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct hl_fence *fence;
- spin_lock(&ctx->cs_lock);
-
- if (seq >= ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq >= ctx->cs_sequence)
return ERR_PTR(-EINVAL);
- }
- if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
return NULL;
- }
fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
hl_fence_get(fence);
+ return fence;
+}
+
+struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+{
+ struct hl_fence *fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ fence = hl_ctx_get_fence_locked(ctx, seq);
spin_unlock(&ctx->cs_lock);
@@ -255,6 +321,46 @@ struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
}
/*
+ * hl_ctx_get_fences - get multiple CS fences under the same CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq_arr: array of CS sequences to wait for
+ * @fence: fence array to store the CS fences
+ * @arr_len: length of seq_arr and fence_arr
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len)
+{
+ struct hl_fence **fence_arr_base = fence;
+ int i, rc = 0;
+
+ spin_lock(&ctx->cs_lock);
+
+ for (i = 0; i < arr_len; i++, fence++) {
+ u64 seq = seq_arr[i];
+
+ *fence = hl_ctx_get_fence_locked(ctx, seq);
+
+ if (IS_ERR(*fence)) {
+ dev_err(ctx->hdev->dev,
+ "Failed to get fence for CS with seq 0x%llx\n",
+ seq);
+ rc = PTR_ERR(*fence);
+ break;
+ }
+ }
+
+ spin_unlock(&ctx->cs_lock);
+
+ if (rc)
+ hl_fences_put(fence_arr_base, i);
+
+ return rc;
+}
+
+/*
* hl_ctx_mgr_init - initialize the context manager
*
* @mgr: pointer to context manager structure
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 703d79fb6f3f..985f1f3dbd20 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -209,12 +209,12 @@ static int userptr_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s, " pid user virtual address size dma dir\n");
seq_puts(s, "----------------------------------------------------------\n");
}
- seq_printf(s,
- " 0x%-14llx %-10u %-30s\n",
- userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
+ userptr->pid, userptr->addr, userptr->size,
+ dma_dir[userptr->dir]);
}
spin_unlock(&dev_entry->userptr_spinlock);
@@ -235,7 +235,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool once = true;
u64 j;
int i;
@@ -261,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
if (*vm_type == VM_TYPE_USERPTR) {
userptr = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u\n",
+ " 0x%-14llx %-10llu\n",
hnode->vaddr, userptr->size);
} else {
phys_pg_pack = hnode->ptr;
@@ -320,6 +320,77 @@ static int vm_show(struct seq_file *s, void *data)
return 0;
}
+static int userptr_lookup_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct scatterlist *sg;
+ struct hl_userptr *userptr;
+ bool first = true;
+ u64 total_npages, npages, sg_start, sg_end;
+ dma_addr_t dma_addr;
+ int i;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (dev_entry->userptr_lookup >= userptr->addr &&
+ dev_entry->userptr_lookup < userptr->addr + userptr->size) {
+ total_npages = 0;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
+ i) {
+ npages = hl_get_sg_info(sg, &dma_addr);
+ sg_start = userptr->addr +
+ total_npages * PAGE_SIZE;
+ sg_end = userptr->addr +
+ (total_npages + npages) * PAGE_SIZE;
+
+ if (dev_entry->userptr_lookup >= sg_start &&
+ dev_entry->userptr_lookup < sg_end) {
+ dma_addr += (dev_entry->userptr_lookup -
+ sg_start);
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address dma address pid region start region size\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------\n");
+ }
+ seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
+ dev_entry->userptr_lookup,
+ (u64)dma_addr, userptr->pid,
+ userptr->addr, userptr->size);
+ }
+ total_npages += npages;
+ }
+ }
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ ssize_t rc;
+ u64 value;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ dev_entry->userptr_lookup = value;
+
+ return count;
+}
+
static int mmu_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
@@ -349,7 +420,7 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
- phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
+ hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
if (hops_info.scrambled_vaddr &&
(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
@@ -491,11 +562,10 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_ctx *ctx = hdev->compute_ctx;
struct hl_vm_hash_node *hnode;
+ u64 end_address, range_size;
struct hl_userptr *userptr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool valid = false;
- u64 end_address;
- u32 range_size;
int i, rc = 0;
if (!ctx) {
@@ -1043,6 +1113,60 @@ static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
return 0;
}
+static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ ssize_t rc;
+
+ down_read(&entry->state_dump_sem);
+ if (!entry->state_dump[entry->state_dump_head])
+ rc = 0;
+ else
+ rc = simple_read_from_buffer(
+ buf, count, ppos,
+ entry->state_dump[entry->state_dump_head],
+ strlen(entry->state_dump[entry->state_dump_head]));
+ up_read(&entry->state_dump_sem);
+
+ return rc;
+}
+
+static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ ssize_t rc;
+ u32 size;
+ int i;
+
+ rc = kstrtouint_from_user(buf, count, 10, &size);
+ if (rc)
+ return rc;
+
+ if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
+ dev_err(hdev->dev, "Invalid number of dumps to skip\n");
+ return -EINVAL;
+ }
+
+ if (entry->state_dump[entry->state_dump_head]) {
+ down_write(&entry->state_dump_sem);
+ for (i = 0; i < size; ++i) {
+ vfree(entry->state_dump[entry->state_dump_head]);
+ entry->state_dump[entry->state_dump_head] = NULL;
+ if (entry->state_dump_head > 0)
+ entry->state_dump_head--;
+ else
+ entry->state_dump_head =
+ ARRAY_SIZE(entry->state_dump) - 1;
+ }
+ up_write(&entry->state_dump_sem);
+ }
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1110,12 +1234,19 @@ static const struct file_operations hl_security_violations_fops = {
.read = hl_security_violations_read
};
+static const struct file_operations hl_state_dump_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_state_dump_read,
+ .write = hl_state_dump_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
+ {"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
@@ -1172,6 +1303,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
INIT_LIST_HEAD(&dev_entry->userptr_list);
INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
mutex_init(&dev_entry->file_mutex);
+ init_rwsem(&dev_entry->state_dump_sem);
spin_lock_init(&dev_entry->cb_spinlock);
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
@@ -1283,6 +1415,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&hdev->skip_reset_on_timeout);
+ debugfs_create_file("state_dump",
+ 0600,
+ dev_entry->root,
+ dev_entry,
+ &hl_state_dump_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
@@ -1297,6 +1435,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
void hl_debugfs_remove_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+ int i;
debugfs_remove_recursive(entry->root);
@@ -1304,6 +1443,9 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
vfree(entry->blob_desc.data);
+ for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
+ vfree(entry->state_dump[i]);
+
kfree(entry->entry_arr);
}
@@ -1416,6 +1558,28 @@ void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
}
+/**
+ * hl_debugfs_set_state_dump - register state dump making it accessible via
+ * debugfs
+ * @hdev: pointer to the device structure
+ * @data: the actual dump data
+ * @length: the length of the data
+ */
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ down_write(&dev_entry->state_dump_sem);
+
+ dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
+ ARRAY_SIZE(dev_entry->state_dump);
+ vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
+ dev_entry->state_dump[dev_entry->state_dump_head] = data;
+
+ up_write(&dev_entry->state_dump_sem);
+}
+
void __init hl_debugfs_init(void)
{
hl_debug_root = debugfs_create_dir("habanalabs", NULL);
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index ff4cbde289c0..97c7c86580e6 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -7,11 +7,11 @@
#define pr_fmt(fmt) "habanalabs: " fmt
+#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <uapi/misc/habanalabs.h>
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
@@ -23,6 +23,8 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
status = HL_DEVICE_STATUS_NEEDS_RESET;
else if (hdev->disabled)
status = HL_DEVICE_STATUS_MALFUNCTION;
+ else if (!hdev->init_done)
+ status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -44,6 +46,7 @@ bool hl_device_operational(struct hl_device *hdev,
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
case HL_DEVICE_STATUS_OPERATIONAL:
+ case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
@@ -129,8 +132,8 @@ static int hl_device_release(struct inode *inode, struct file *filp)
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
if (!hl_hpriv_put(hpriv))
- dev_warn(hdev->dev,
- "Device is still in use because there are live CS and/or memory mappings\n");
+ dev_notice(hdev->dev,
+ "User process closed FD but device still in use\n");
hdev->last_open_session_duration_jif =
jiffies - hdev->last_successful_open_jif;
@@ -308,9 +311,15 @@ static void device_hard_reset_pending(struct work_struct *work)
container_of(work, struct hl_device_reset_work,
reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
+ u32 flags;
int rc;
- rc = hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD);
+ flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
+
+ if (device_reset_work->fw_reset)
+ flags |= HL_RESET_FW;
+
+ rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
dev_info(hdev->dev,
"Could not reset device. will try again in %u seconds",
@@ -682,6 +691,44 @@ out:
return rc;
}
+static void take_release_locks(struct hl_device *hdev)
+{
+ /* Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
+}
+
+static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+{
+ if (hard_reset)
+ device_late_fini(hdev);
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ /* Release all pending user interrupts, each pending user interrupt
+ * holds a reference to user context
+ */
+ hl_release_pending_user_interrupts(hdev);
+}
+
/*
* hl_device_suspend - initiate device suspend
*
@@ -707,16 +754,7 @@ int hl_device_suspend(struct hl_device *hdev)
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush processes that are sending message to CPU */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
+ take_release_locks(hdev);
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
@@ -819,6 +857,11 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
usleep_range(1000, 10000);
put_task_struct(task);
+ } else {
+ dev_warn(hdev->dev,
+ "Can't get task struct for PID so giving up on killing process\n");
+ mutex_unlock(&hdev->fpriv_list_lock);
+ return -ETIME;
}
}
@@ -885,7 +928,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread, hard_instead_soft = false;
+ bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
int i, rc;
if (!hdev->init_done) {
@@ -894,8 +937,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
return 0;
}
- hard_reset = (flags & HL_RESET_HARD) != 0;
- from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
+ hard_reset = !!(flags & HL_RESET_HARD);
+ from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
+ fw_reset = !!(flags & HL_RESET_FW);
if (!hard_reset && !hdev->supports_soft_reset) {
hard_instead_soft = true;
@@ -947,11 +991,13 @@ do_reset:
else
hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- /*
- * if reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ *
+ * If F/W is performing the reset, no need to send it a message to disable
+ * PCI access
*/
- if (hard_reset && !(flags & HL_RESET_HEARTBEAT)) {
+ if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -970,15 +1016,7 @@ do_reset:
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
dev_err(hdev->dev, "Going to RESET device!\n");
}
@@ -989,6 +1027,8 @@ again:
hdev->process_kill_trial_cnt = 0;
+ hdev->device_reset_work.fw_reset = fw_reset;
+
/*
* Because the reset function can't run from heartbeat work,
* we need to call the reset function from a dedicated work.
@@ -999,31 +1039,7 @@ again:
return 0;
}
- if (hard_reset) {
- device_late_fini(hdev);
-
- /*
- * Now that the heartbeat thread is closed, flush processes
- * which are sending messages to CPU
- */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
- }
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, hard_reset);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
-
- /* Release all pending user interrupts, each pending user interrupt
- * holds a reference to user context
- */
- hl_release_pending_user_interrupts(hdev);
+ cleanup_resources(hdev, hard_reset, fw_reset);
kill_processes:
if (hard_reset) {
@@ -1057,12 +1073,15 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, hard_reset);
+ hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
+ hdev->fw_loader.linux_loaded = false;
+
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
@@ -1292,6 +1311,10 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto user_interrupts_fini;
+
+ /* initialize completion structure for multi CS wait */
+ hl_multi_cs_completion_init(hdev);
+
/*
* Initialize the H/W queues. Must be done before hw_init, because
* there the addresses of the kernel queue are being written to the
@@ -1361,6 +1384,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->compute_ctx = NULL;
+ hdev->asic_funcs->state_dump_init(hdev);
+
hl_debugfs_add_device(hdev);
/* debugfs nodes are created in hl_ctx_init so it must be called after
@@ -1567,31 +1592,13 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev);
- device_late_fini(hdev);
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, true);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
+ cleanup_resources(hdev, true, false);
/* Kill processes here after CS rollback. This is because the process
* can't really exit until all its CSs are done, which is what we
@@ -1610,7 +1617,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+
+ hdev->fw_loader.linux_loaded = false;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 2e4d04ec6b53..8d2568c63f19 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -240,11 +240,15 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
- goto out;
- }
+ /*
+ * The CPU queue is a synchronous queue with an effective depth of
+ * a single entry (although it is allocated with room for multiple
+ * entries). We lock on it using 'send_cpu_message_lock' which
+ * serializes accesses to the CPU queue.
+ * Which means that we don't need to lock the access to the entire H/W
+ * queues module when submitting a JOB to the CPU queue.
+ */
+ hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -663,17 +667,15 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
hdev->event_queue.check_eqe_index = false;
/* Read FW application security bits again */
- if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid) {
- hdev->asic_prop.fw_app_cpu_boot_dev_sts0 =
- RREG32(sts_boot_dev_sts0_reg);
- if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ if (prop->fw_cpu_boot_dev_sts0_valid) {
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
+ if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
hdev->event_queue.check_eqe_index = true;
}
- if (hdev->asic_prop.fw_cpu_boot_dev_sts1_valid)
- hdev->asic_prop.fw_app_cpu_boot_dev_sts1 =
- RREG32(sts_boot_dev_sts1_reg);
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
@@ -1008,6 +1010,11 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
+
+ /* Must clear this register in order to prevent preboot
+ * from reading WFE after reboot
+ */
+ WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
}
hdev->device_cpu_is_halted = true;
@@ -1055,6 +1062,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
dev_err(hdev->dev,
"Device boot progress - Thermal Sensor initialization failed\n");
break;
+ case CPU_BOOT_STATUS_SECURITY_READY:
+ dev_err(hdev->dev,
+ "Device boot progress - Stuck in preboot after security initialization\n");
+ break;
default:
dev_err(hdev->dev,
"Device boot progress - Invalid status code %d\n",
@@ -1238,11 +1249,6 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
* b. Check whether hard reset is done by boot cpu
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
- *
- * Preboot:
- * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED). If set, then-
- * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
- * If set, then mark GIC controller to be disabled.
*/
prop->hard_reset_done_by_fw =
!!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
@@ -1953,8 +1959,8 @@ static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
if (!hdev->asic_prop.gic_interrupts_enable &&
!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
- dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_irq_ctrl;
- dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_irq_ctrl;
+ dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
+ dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
dev_warn(hdev->dev,
"Using a single interrupt interface towards cpucp");
@@ -2122,8 +2128,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
- prop->fw_app_cpu_boot_dev_sts0 =
- RREG32(cpu_boot_dev_sts0_reg);
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
@@ -2143,8 +2148,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
}
if (prop->fw_cpu_boot_dev_sts1_valid) {
- prop->fw_app_cpu_boot_dev_sts1 =
- RREG32(cpu_boot_dev_sts1_reg);
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
dev_dbg(hdev->dev,
"Firmware application CPU status1 %#x\n",
@@ -2235,6 +2239,10 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
+ /*
+ * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
+ * It will be updated from FW after hl_fw_dynamic_request_descriptor().
+ */
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 6b3cdd7e068a..bebebcb163ee 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/hashtable.h>
#include <linux/debugfs.h>
+#include <linux/rwsem.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -65,6 +66,11 @@
#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
+#define HL_STATE_DUMP_HIST_LEN 5
+
+#define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -122,12 +128,17 @@ enum hl_mmu_page_table_location {
*
* - HL_RESET_DEVICE_RELEASE
* Set if reset is due to device release
+ *
+ * - HL_RESET_FW
+ * F/W will perform the reset. No need to ask it to reset the device. This is relevant
+ * only when running with secured f/w
*/
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
#define HL_RESET_TDR (1 << 3)
#define HL_RESET_DEVICE_RELEASE (1 << 4)
+#define HL_RESET_FW (1 << 5)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -236,7 +247,9 @@ enum hl_cs_type {
CS_TYPE_DEFAULT,
CS_TYPE_SIGNAL,
CS_TYPE_WAIT,
- CS_TYPE_COLLECTIVE_WAIT
+ CS_TYPE_COLLECTIVE_WAIT,
+ CS_RESERVE_SIGNALS,
+ CS_UNRESERVE_SIGNALS
};
/*
@@ -281,13 +294,17 @@ enum queue_cb_alloc_flags {
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
* @sob_id: id of this SOB.
+ * @sob_addr: the sob offset from the base address.
* @q_idx: the H/W queue that uses this SOB.
+ * @need_reset: reset indication set when switching to the other sob.
*/
struct hl_hw_sob {
struct hl_device *hdev;
struct kref kref;
u32 sob_id;
+ u32 sob_addr;
u32 q_idx;
+ bool need_reset;
};
enum hl_collective_mode {
@@ -317,11 +334,11 @@ struct hw_queue_properties {
};
/**
- * enum vm_type_t - virtual memory mapping request information.
+ * enum vm_type - virtual memory mapping request information.
* @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
-enum vm_type_t {
+enum vm_type {
VM_TYPE_USERPTR = 0x1,
VM_TYPE_PHYS_PACK = 0x2
};
@@ -382,6 +399,16 @@ struct hl_mmu_properties {
};
/**
+ * struct hl_hints_range - hint addresses reserved va range.
+ * @start_addr: start address of the va range.
+ * @end_addr: end address of the va range.
+ */
+struct hl_hints_range {
+ u64 start_addr;
+ u64 end_addr;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
@@ -392,6 +419,10 @@ struct hl_mmu_properties {
* @pmmu: PCI (host) MMU address translation properties.
* @pmmu_huge: PCI (host) MMU address translation properties for memory
* allocated with huge pages.
+ * @hints_dram_reserved_va_range: dram hint addresses reserved range.
+ * @hints_host_reserved_va_range: host hint addresses reserved range.
+ * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
+ * range.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -412,6 +443,10 @@ struct hl_mmu_properties {
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
+ * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
+ * for hints validity check.
+ * device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ * to enable the device to access them.
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -459,6 +494,8 @@ struct hl_mmu_properties {
* reserved for the user
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
+ * @server_type: Server type that the ASIC is currently installed in.
+ * The value is according to enum hl_server_type in uapi file.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_enabled: true if security measures are enabled in firmware,
@@ -470,6 +507,7 @@ struct hl_mmu_properties {
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
* @num_functional_hbms: number of functional HBMs in each DCORE.
+ * @hints_range_reservation: device support hint addresses range reservation.
* @iatu_done_by_fw: true if iATU configuration is being done by FW.
* @dynamic_fw_load: is dynamic FW load is supported.
* @gic_interrupts_enable: true if FW is not blocking GIC controller,
@@ -483,6 +521,9 @@ struct asic_fixed_properties {
struct hl_mmu_properties dmmu;
struct hl_mmu_properties pmmu;
struct hl_mmu_properties pmmu_huge;
+ struct hl_hints_range hints_dram_reserved_va_range;
+ struct hl_hints_range hints_host_reserved_va_range;
+ struct hl_hints_range hints_host_hpage_reserved_va_range;
u64 sram_base_address;
u64 sram_end_address;
u64 sram_user_base_address;
@@ -500,6 +541,8 @@ struct asic_fixed_properties {
u64 mmu_dram_default_page_addr;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
+ u64 dram_hints_align_mask;
+ u64 device_dma_offset_for_host_access;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -534,6 +577,7 @@ struct asic_fixed_properties {
u16 first_available_user_msix_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
+ u16 server_type;
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_enabled;
@@ -542,6 +586,7 @@ struct asic_fixed_properties {
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
u8 num_functional_hbms;
+ u8 hints_range_reservation;
u8 iatu_done_by_fw;
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
@@ -552,40 +597,45 @@ struct asic_fixed_properties {
* @completion: fence is implemented using completion
* @refcount: refcount for this fence
* @cs_sequence: sequence of the corresponding command submission
+ * @stream_master_qid_map: streams masters QID bitmap to represent all streams
+ * masters QIDs that multi cs is waiting on
* @error: mark this fence with error
* @timestamp: timestamp upon completion
- *
*/
struct hl_fence {
struct completion completion;
struct kref refcount;
u64 cs_sequence;
+ u32 stream_master_qid_map;
int error;
ktime_t timestamp;
};
/**
* struct hl_cs_compl - command submission completion object.
- * @sob_reset_work: workqueue object to run SOB reset flow.
* @base_fence: hl fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
+ * @encaps_sig_hdl: encaps signals hanlder.
* @cs_seq: command submission sequence number.
* @type: type of the CS - signal/wait.
* @sob_val: the SOB value that is used in this signal/wait CS.
* @sob_group: the SOB group that is used in this collective wait CS.
+ * @encaps_signals: indication whether it's a completion object of cs with
+ * encaps signals or not.
*/
struct hl_cs_compl {
- struct work_struct sob_reset_work;
struct hl_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 cs_seq;
enum hl_cs_type type;
u16 sob_val;
u16 sob_group;
+ bool encaps_signals;
};
/*
@@ -698,6 +748,17 @@ struct hl_sync_stream_properties {
};
/**
+ * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
+ * handlers manager
+ * @lock: protects handles.
+ * @handles: an idr to hold all encapsulated signals handles.
+ */
+struct hl_encaps_signals_mgr {
+ spinlock_t lock;
+ struct idr handles;
+};
+
+/**
* struct hl_hw_queue - describes a H/W transport queue.
* @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
* @sync_stream_prop: sync stream queue properties
@@ -875,7 +936,7 @@ struct pci_mem_region {
u64 region_base;
u64 region_size;
u64 bar_size;
- u32 offset_in_bar;
+ u64 offset_in_bar;
u8 bar_id;
u8 used;
};
@@ -996,7 +1057,7 @@ struct fw_load_mgr {
* hw_fini and before CS rollback.
* @suspend: handles IP specific H/W or SW changes for suspend.
* @resume: handles IP specific H/W or SW changes for resume.
- * @cb_mmap: maps a CB.
+ * @mmap: maps a memory.
* @ring_doorbell: increment PI on a given QMAN.
* @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
* function because the PQs are located in different memory areas
@@ -1101,6 +1162,10 @@ struct fw_load_mgr {
* generic f/w compatible PLL Indexes
* @init_firmware_loader: initialize data for FW loader.
* @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
+ * @state_dump_init: initialize constants required for state dump
+ * @get_sob_addr: get SOB base address offset.
+ * @set_pci_memory_regions: setting properties of PCI memory regions
+ * @get_stream_master_qid_arr: get pointer to stream masters QID array
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1110,11 +1175,11 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
- void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
- void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
+ void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
+ void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
- int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
@@ -1210,10 +1275,11 @@ struct hl_asic_funcs {
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
void (*set_dma_mask_from_fw)(struct hl_device *hdev);
u64 (*get_device_time)(struct hl_device *hdev);
- void (*collective_wait_init_cs)(struct hl_cs *cs);
+ int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id);
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset);
u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
void (*ack_protection_bits_errors)(struct hl_device *hdev);
@@ -1226,6 +1292,10 @@ struct hl_asic_funcs {
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
void (*init_firmware_loader)(struct hl_device *hdev);
void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
+ void (*state_dump_init)(struct hl_device *hdev);
+ u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
+ void (*set_pci_memory_regions)(struct hl_device *hdev);
+ u32* (*get_stream_master_qid_arr)(void);
};
@@ -1283,20 +1353,6 @@ struct hl_cs_counters_atomic {
};
/**
- * struct hl_pending_cb - pending command buffer structure
- * @cb_node: cb node in pending cb list
- * @cb: command buffer to send in next submission
- * @cb_size: command buffer size
- * @hw_queue_id: destination queue id
- */
-struct hl_pending_cb {
- struct list_head cb_node;
- struct hl_cb *cb;
- u32 cb_size;
- u32 hw_queue_id;
-};
-
-/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1312,28 +1368,21 @@ struct hl_pending_cb {
* MMU hash or walking the PGT requires talking this lock.
* @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
- * pending_cb_list: list of pending command buffers waiting to be sent upon
- * next user command submission context.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
+ * @sig_mgr: encaps signals handle manager.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
* @dram_default_hops: array that holds all hops addresses needed for default
* DRAM mapping.
- * @pending_cb_lock: spinlock to protect pending cb list
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_ctx_switch_token: token to prevent multiple threads of the same
* context from running the context switch phase.
* Only a single thread should run it.
- * @thread_pending_cb_token: token to prevent multiple threads from processing
- * the pending CB list. Only a single thread should
- * process the list since it is protected by a
- * spinlock and we don't want to halt the entire
- * command submission sequence.
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the context switch phase from moving to their
* execution phase before the context switch phase
@@ -1353,17 +1402,15 @@ struct hl_ctx {
struct mutex mmu_lock;
struct mutex hw_block_list_lock;
struct list_head debugfs_list;
- struct list_head pending_cb_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
+ struct hl_encaps_signals_mgr sig_mgr;
u64 cs_sequence;
u64 *dram_default_hops;
- spinlock_t pending_cb_lock;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
atomic_t thread_ctx_switch_token;
- atomic_t thread_pending_cb_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
u32 handle;
@@ -1394,20 +1441,22 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
+ * @pid: the pid of the user process owning the memory
* @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
struct hl_userptr {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
struct list_head job_node;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
enum dma_data_direction dir;
struct list_head debugfs_list;
+ pid_t pid;
u64 addr;
- u32 size;
+ u64 size;
u8 dma_mapped;
};
@@ -1426,12 +1475,14 @@ struct hl_userptr {
* @mirror_node : node in device mirror list of command submissions.
* @staged_cs_node: node in the staged cs list.
* @debugfs_list: node in debugfs list of command submissions.
+ * @encaps_sig_hdl: holds the encaps signals handle.
* @sequence: the sequence number of this CS.
* @staged_sequence: the sequence of the staged submission this CS is part of,
* relevant only if staged_cs is set.
* @timeout_jiffies: cs timeout in jiffies.
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
+ * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -1445,6 +1496,7 @@ struct hl_userptr {
* @staged_cs: true if this CS is part of a staged submission.
* @skip_reset_on_timeout: true if we shall not reset the device in case
* timeout occurs (debug scenario).
+ * @encaps_signals: true if this CS has encaps reserved signals.
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1459,11 +1511,13 @@ struct hl_cs {
struct list_head mirror_node;
struct list_head staged_cs_node;
struct list_head debugfs_list;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
u64 submission_time_jiffies;
enum hl_cs_type type;
+ u32 encaps_sig_hdl_id;
u8 submitted;
u8 completed;
u8 timedout;
@@ -1474,6 +1528,7 @@ struct hl_cs {
u8 staged_first;
u8 staged_cs;
u8 skip_reset_on_timeout;
+ u8 encaps_signals;
};
/**
@@ -1493,6 +1548,8 @@ struct hl_cs {
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
+ * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
+ * to wait on part of the reserved signals.
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
@@ -1517,6 +1574,7 @@ struct hl_cs_job {
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
+ u32 encaps_sig_wait_offset;
u8 is_kernel_allocated_cb;
u8 contains_dma_pkt;
};
@@ -1613,7 +1671,7 @@ struct hl_vm_hw_block_list_node {
* @created_from_userptr: is product of host virtual address.
*/
struct hl_vm_phys_pg_pack {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
u64 *pages;
u64 npages;
u64 total_size;
@@ -1759,9 +1817,13 @@ struct hl_debugfs_entry {
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
* @blob_desc: descriptor of blob
+ * @state_dump: data of the system states in case of a bad cs.
+ * @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @userptr_lookup: the target user ptr to look up for on demand.
* @mmu_asid: ASID to use while translating in mmu_show.
+ * @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
@@ -1783,14 +1845,149 @@ struct hl_dbg_device_entry {
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
struct debugfs_blob_wrapper blob_desc;
+ char *state_dump[HL_STATE_DUMP_HIST_LEN];
+ struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
+ u64 userptr_lookup;
u32 mmu_asid;
+ u32 state_dump_head;
u8 i2c_bus;
u8 i2c_addr;
u8 i2c_reg;
};
+/**
+ * struct hl_hw_obj_name_entry - single hw object name, member of
+ * hl_state_dump_specs
+ * @node: link to the containing hash table
+ * @name: hw object name
+ * @id: object identifier
+ */
+struct hl_hw_obj_name_entry {
+ struct hlist_node node;
+ const char *name;
+ u32 id;
+};
+
+enum hl_state_dump_specs_props {
+ SP_SYNC_OBJ_BASE_ADDR,
+ SP_NEXT_SYNC_OBJ_ADDR,
+ SP_SYNC_OBJ_AMOUNT,
+ SP_MON_OBJ_WR_ADDR_LOW,
+ SP_MON_OBJ_WR_ADDR_HIGH,
+ SP_MON_OBJ_WR_DATA,
+ SP_MON_OBJ_ARM_DATA,
+ SP_MON_OBJ_STATUS,
+ SP_MONITORS_AMOUNT,
+ SP_TPC0_CMDQ,
+ SP_TPC0_CFG_SO,
+ SP_NEXT_TPC,
+ SP_MME_CMDQ,
+ SP_MME_CFG_SO,
+ SP_NEXT_MME,
+ SP_DMA_CMDQ,
+ SP_DMA_CFG_SO,
+ SP_DMA_QUEUES_OFFSET,
+ SP_NUM_OF_MME_ENGINES,
+ SP_SUB_MME_ENG_NUM,
+ SP_NUM_OF_DMA_ENGINES,
+ SP_NUM_OF_TPC_ENGINES,
+ SP_ENGINE_NUM_OF_QUEUES,
+ SP_ENGINE_NUM_OF_STREAMS,
+ SP_ENGINE_NUM_OF_FENCES,
+ SP_FENCE0_CNT_OFFSET,
+ SP_FENCE0_RDATA_OFFSET,
+ SP_CP_STS_OFFSET,
+ SP_NUM_CORES,
+
+ SP_MAX
+};
+
+enum hl_sync_engine_type {
+ ENGINE_TPC,
+ ENGINE_DMA,
+ ENGINE_MME,
+};
+
+/**
+ * struct hl_mon_state_dump - represents a state dump of a single monitor
+ * @id: monitor id
+ * @wr_addr_low: address monitor will write to, low bits
+ * @wr_addr_high: address monitor will write to, high bits
+ * @wr_data: data monitor will write
+ * @arm_data: register value containing monitor configuration
+ * @status: monitor status
+ */
+struct hl_mon_state_dump {
+ u32 id;
+ u32 wr_addr_low;
+ u32 wr_addr_high;
+ u32 wr_data;
+ u32 arm_data;
+ u32 status;
+};
+
+/**
+ * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
+ * @engine_type: type of the engine
+ * @engine_id: id of the engine
+ * @sync_id: id of the sync object
+ */
+struct hl_sync_to_engine_map_entry {
+ struct hlist_node node;
+ enum hl_sync_engine_type engine_type;
+ u32 engine_id;
+ u32 sync_id;
+};
+
+/**
+ * struct hl_sync_to_engine_map - maps sync object id to associated engine id
+ * @tb: hash table containing the mapping, each element is of type
+ * struct hl_sync_to_engine_map_entry
+ */
+struct hl_sync_to_engine_map {
+ DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
+};
+
+/**
+ * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
+ * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
+ * @print_single_monitor: format monitor data as string
+ * @monitor_valid: return true if given monitor dump is valid
+ * @print_fences_single_engine: format fences data as string
+ */
+struct hl_state_dump_specs_funcs {
+ int (*gen_sync_to_engine_map)(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map);
+ int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+ int (*monitor_valid)(struct hl_mon_state_dump *mon);
+ int (*print_fences_single_engine)(struct hl_device *hdev,
+ u64 base_offset,
+ u64 status_base_offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id, char **buf,
+ size_t *size, size_t *offset);
+};
+
+/**
+ * struct hl_state_dump_specs - defines ASIC known hw objects names
+ * @so_id_to_str_tb: sync objects names index table
+ * @monitor_id_to_str_tb: monitors names index table
+ * @funcs: virtual functions used for state dump
+ * @sync_namager_names: readable names for sync manager if available (ex: N_E)
+ * @props: pointer to a per asic const props array required for state dump
+ */
+struct hl_state_dump_specs {
+ DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ struct hl_state_dump_specs_funcs funcs;
+ const char * const *sync_namager_names;
+ s64 *props;
+};
+
/*
* DEVICES
@@ -1798,7 +1995,7 @@ struct hl_dbg_device_entry {
#define HL_STR_MAX 32
-#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
+#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
* x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
@@ -1946,11 +2143,13 @@ struct hwmon_chip_info;
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done.
* @hdev: habanalabs device structure.
+ * @fw_reset: whether f/w will do the reset without us sending them a message to do it.
*/
struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work;
struct hl_device *hdev;
+ bool fw_reset;
};
/**
@@ -2065,6 +2264,58 @@ struct hl_mmu_funcs {
};
/**
+ * number of user contexts allowed to call wait_for_multi_cs ioctl in
+ * parallel
+ */
+#define MULTI_CS_MAX_USER_CTX 2
+
+/**
+ * struct multi_cs_completion - multi CS wait completion.
+ * @completion: completion of any of the CS in the list
+ * @lock: spinlock for the completion structure
+ * @timestamp: timestamp for the multi-CS completion
+ * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
+ * is waiting
+ * @used: 1 if in use, otherwise 0
+ */
+struct multi_cs_completion {
+ struct completion completion;
+ spinlock_t lock;
+ s64 timestamp;
+ u32 stream_master_qid_map;
+ u8 used;
+};
+
+/**
+ * struct multi_cs_data - internal data for multi CS call
+ * @ctx: pointer to the context structure
+ * @fence_arr: array of fences of all CSs
+ * @seq_arr: array of CS sequence numbers
+ * @timeout_us: timeout in usec for waiting for CS to complete
+ * @timestamp: timestamp of first completed CS
+ * @wait_status: wait for CS status
+ * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
+ * @stream_master_qid_map: bitmap of all stream master QIDs on which the
+ * multi-CS is waiting
+ * @arr_len: fence_arr and seq_arr array length
+ * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
+ * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
+ */
+struct multi_cs_data {
+ struct hl_ctx *ctx;
+ struct hl_fence **fence_arr;
+ u64 *seq_arr;
+ s64 timeout_us;
+ s64 timestamp;
+ long wait_status;
+ u32 completion_bitmap;
+ u32 stream_master_qid_map;
+ u8 arr_len;
+ u8 gone_cs;
+ u8 update_ts;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -2129,6 +2380,8 @@ struct hl_mmu_funcs {
* @mmu_func: device-related MMU functions.
* @fw_loader: FW loader manager.
* @pci_mem_region: array of memory regions in the PCI
+ * @state_dump_specs: constants and dictionaries needed to dump system state.
+ * @multi_cs_completion: array of multi-CS completion.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2205,6 +2458,7 @@ struct hl_mmu_funcs {
* halted. We can't halt it again because the COMMS
* protocol will throw an error. Relevant only for
* cases where Linux was not loaded to device CPU
+ * @supports_wait_for_multi_cs: true if wait for multi CS is supported
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2273,6 +2527,11 @@ struct hl_device {
struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
+ struct hl_state_dump_specs state_dump_specs;
+
+ struct multi_cs_completion multi_cs_completion[
+ MULTI_CS_MAX_USER_CTX];
+ u32 *stream_master_qid_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -2322,6 +2581,8 @@ struct hl_device {
u8 curr_reset_cause;
u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
+ u8 supports_wait_for_multi_cs;
+ u8 stream_master_qid_arr_size;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2343,6 +2604,29 @@ struct hl_device {
};
+/**
+ * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
+ * @refcount: refcount used to protect removing this id when several
+ * wait cs are used to wait of the reserved encaps signals.
+ * @hdev: pointer to habanalabs device structure.
+ * @hw_sob: pointer to H/W SOB used in the reservation.
+ * @cs_seq: staged cs sequence which contains encapsulated signals
+ * @id: idr handler id to be used to fetch the handler info
+ * @q_idx: stream queue index
+ * @pre_sob_val: current SOB value before reservation
+ * @count: signals number
+ */
+struct hl_cs_encaps_sig_handle {
+ struct kref refcount;
+ struct hl_device *hdev;
+ struct hl_hw_sob *hw_sob;
+ u64 cs_seq;
+ u32 id;
+ u32 q_idx;
+ u32 pre_sob_val;
+ u32 count;
+};
+
/*
* IOCTLs
*/
@@ -2373,6 +2657,23 @@ struct hl_ioctl_desc {
*/
/**
+ * hl_get_sg_info() - get number of pages and the DMA address from SG list.
+ * @sg: the SG list.
+ * @dma_addr: pointer to DMA address to return.
+ *
+ * Calculate the number of consecutive pages described by the SG list. Take the
+ * offset of the address in the first page, add to it the length and round it up
+ * to the number of needed pages.
+ */
+static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
+{
+ *dma_addr = sg_dma_address(sg);
+
+ return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+}
+
+/**
* hl_mem_area_inside_range() - Checks whether address+size are inside a range.
* @address: The start address of the area we want to validate.
* @size: The size in bytes of the area we want to validate.
@@ -2436,7 +2737,9 @@ void destroy_hdev(struct hl_device *hdev);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
- u32 cb_size, u64 cb_ptr);
+ u32 cb_size, u64 cb_ptr);
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr);
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
@@ -2470,6 +2773,8 @@ void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len);
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
@@ -2511,18 +2816,19 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
-void hl_pending_cb_list_flush(struct hl_ctx *ctx);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
void hl_fence_put(struct hl_fence *fence);
+void hl_fences_put(struct hl_fence **fence, int len);
void hl_fence_get(struct hl_fence *fence);
void cs_get(struct hl_cs *cs);
bool cs_needs_completion(struct hl_cs *cs);
bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
+void hl_multi_cs_completion_init(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -2650,9 +2956,25 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+void hw_sob_get(struct hl_hw_sob *hw_sob);
+void hw_sob_put(struct hl_hw_sob *hw_sob);
+void hl_encaps_handle_do_release(struct kref *ref);
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count);
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
+
+int hl_state_dump(struct hl_device *hdev);
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
+__printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...);
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
#ifdef CONFIG_DEBUG_FS
@@ -2673,6 +2995,8 @@ void hl_debugfs_remove_userptr(struct hl_device *hdev,
struct hl_userptr *userptr);
void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length);
#else
@@ -2746,6 +3070,11 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
+ char *data, unsigned long length)
+{
+}
+
#endif
/* IOCTLs */
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 4194cda2d04c..a75e4fceb9d8 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -141,7 +141,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hpriv->taskpid = find_get_pid(current->pid);
+ hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_list_lock);
@@ -194,7 +194,6 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
-
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
@@ -318,12 +317,16 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->asic_prop.fw_security_enabled = false;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
- "disabled", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
+ "operational", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
"in reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
+ "disabled", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
"needs reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
+ "in device creation", HL_STR_MAX);
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
@@ -532,7 +535,7 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
result = PCI_ERS_RESULT_NONE;
}
- hdev->asic_funcs->halt_engines(hdev, true);
+ hdev->asic_funcs->halt_engines(hdev, true, false);
return result;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index f4dda7b4acdd..86c3257d9ae1 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -94,6 +94,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.first_available_interrupt_id =
prop->first_available_user_msix_interrupt;
+ hw_ip.server_type = prop->server_type;
+
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
}
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index bcabfdbf1e01..76b7de8f1406 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -65,7 +65,7 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
}
/*
- * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
* H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
@@ -80,8 +80,8 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
- struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -222,8 +222,8 @@ static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
* @cb_size: size of CB
* @cb_ptr: pointer to CB location
*
- * This function sends a single CB, that must NOT generate a completion entry
- *
+ * This function sends a single CB, that must NOT generate a completion entry.
+ * Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()'
*/
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
@@ -231,16 +231,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
int rc = 0;
- /*
- * The CPU queue is a synchronous queue with an effective depth of
- * a single entry (although it is allocated with room for multiple
- * entries). Therefore, there is a different lock, called
- * send_cpu_message_lock, that serializes accesses to the CPU queue.
- * As a result, we don't need to lock the access to the entire H/W
- * queues module when submitting a JOB to the CPU queue
- */
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_lock(hdev);
if (hdev->disabled) {
rc = -EPERM;
@@ -258,11 +249,10 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_unlock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
@@ -328,7 +318,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
submit_bd:
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
@@ -407,7 +397,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
static int init_signal_cs(struct hl_device *hdev,
@@ -426,8 +416,9 @@ static int init_signal_cs(struct hl_device *hdev,
cs_cmpl->sob_val = prop->next_sob_val;
dev_dbg(hdev->dev,
- "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
- cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx,
+ cs_cmpl->cs_seq);
/* we set an EB since we must make sure all oeprations are done
* when sending the signal
@@ -435,17 +426,37 @@ static int init_signal_cs(struct hl_device *hdev,
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
cs_cmpl->hw_sob->sob_id, 0, true);
- rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1);
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
+ false);
return rc;
}
-static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl)
+{
+ struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+
+ cs_cmpl->hw_sob = handle->hw_sob;
+
+ /* Note that encaps_sig_wait_offset was validated earlier in the flow
+ * for offset value which exceeds the max reserved signal count.
+ * always decrement 1 of the offset since when the user
+ * set offset 1 for example he mean to wait only for the first
+ * signal only, which will be pre_sob_val, and if he set offset 2
+ * then the value required is (pre_sob_val + 1) and so on...
+ */
+ cs_cmpl->sob_val = handle->pre_sob_val +
+ (job->encaps_sig_wait_offset - 1);
+}
+
+static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
- struct hl_cs_compl *signal_cs_cmpl;
- struct hl_sync_stream_properties *prop;
struct hl_gen_wait_properties wait_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_cs_compl *signal_cs_cmpl;
u32 q_idx;
q_idx = job->hw_queue_id;
@@ -455,14 +466,51 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_compl,
base_fence);
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ if (cs->encaps_signals) {
+ /* use the encaps signal handle stored earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
+
+ dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
+ cs->encaps_sig_hdl->q_idx,
+ cs->encaps_sig_hdl->cs_seq,
+ cs_cmpl->sob_val,
+ job->encaps_sig_wait_offset);
+ } else {
+ /* Copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
dev_dbg(hdev->dev,
- "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+ "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
- prop->base_mon_id, q_idx);
+ prop->base_mon_id, q_idx, cs->sequence);
wait_prop.data = (void *) job->patched_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -471,17 +519,14 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
wait_prop.mon_id = prop->base_mon_id;
wait_prop.q_idx = q_idx;
wait_prop.size = 0;
+
hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
/*
@@ -506,7 +551,60 @@ static int init_signal_wait_cs(struct hl_cs *cs)
if (cs->type & CS_TYPE_SIGNAL)
rc = init_signal_cs(hdev, job, cs_cmpl);
else if (cs->type & CS_TYPE_WAIT)
- init_wait_cs(hdev, cs, job, cs_cmpl);
+ rc = init_wait_cs(hdev, cs, job, cs_cmpl);
+
+ return rc;
+}
+
+static int encaps_sig_first_staged_cs_handler
+ (struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs_compl *cs_cmpl =
+ container_of(cs->fence,
+ struct hl_cs_compl, base_fence);
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_encaps_signals_mgr *mgr;
+ int rc = 0;
+
+ mgr = &hdev->compute_ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
+ if (encaps_sig_hdl) {
+ /*
+ * Set handler CS sequence,
+ * the CS which contains the encapsulated signals.
+ */
+ encaps_sig_hdl->cs_seq = cs->sequence;
+ /* store the handle and set encaps signal indication,
+ * to be used later in cs_do_release to put the last
+ * reference to encaps signals handlers.
+ */
+ cs_cmpl->encaps_signals = true;
+ cs_cmpl->encaps_sig_hdl = encaps_sig_hdl;
+
+ /* set hw_sob pointer in completion object
+ * since it's used in cs_do_release flow to put
+ * refcount to sob
+ */
+ cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob;
+ cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val +
+ encaps_sig_hdl->count;
+
+ dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
+ cs->sequence, encaps_sig_hdl->id,
+ encaps_sig_hdl->count,
+ encaps_sig_hdl->q_idx,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+
+ } else {
+ dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
+ cs->encaps_sig_hdl_id);
+ rc = -EINVAL;
+ }
+
+ spin_unlock(&mgr->lock);
return rc;
}
@@ -581,14 +679,21 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
rc = init_signal_wait_cs(cs);
- if (rc) {
- dev_err(hdev->dev, "Failed to submit signal cs\n");
+ if (rc)
goto unroll_cq_resv;
- }
- } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->collective_wait_init_cs(cs);
+ } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) {
+ rc = hdev->asic_funcs->collective_wait_init_cs(cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+ if (cs->encaps_signals && cs->staged_first) {
+ rc = encaps_sig_first_staged_cs_handler(hdev, cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+
spin_lock(&hdev->cs_mirror_lock);
/* Verify staged CS exists and add to the staged list */
@@ -613,6 +718,11 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
+
+ /* update stream map of the first CS */
+ if (hdev->supports_wait_for_multi_cs)
+ staged_cs->fence->stream_master_qid_map |=
+ cs->fence->stream_master_qid_map;
}
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
@@ -834,6 +944,8 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
hw_sob = &sync_stream_prop->hw_sob[sob];
hw_sob->hdev = hdev;
hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
+ hw_sob->sob_addr =
+ hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
hw_sob->q_idx = q_idx;
kref_init(&hw_sob->kref);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index af339ce1ab4f..33986933aa9e 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -124,7 +124,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_lock(&vm->idr_lock);
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
- GFP_KERNEL);
+ GFP_ATOMIC);
spin_unlock(&vm->idr_lock);
if (handle < 0) {
@@ -529,6 +529,33 @@ static inline int add_va_block(struct hl_device *hdev,
}
/**
+ * is_hint_crossing_range() - check if hint address crossing specified reserved
+ * range.
+ */
+static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
+ u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
+ bool range_cross;
+
+ if (range_type == HL_VA_RANGE_TYPE_DRAM)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_dram_reserved_va_range.start_addr,
+ prop->hints_dram_reserved_va_range.end_addr);
+ else if (range_type == HL_VA_RANGE_TYPE_HOST)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_reserved_va_range.start_addr,
+ prop->hints_host_reserved_va_range.end_addr);
+ else
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_hpage_reserved_va_range.start_addr,
+ prop->hints_host_hpage_reserved_va_range.end_addr);
+
+ return range_cross;
+}
+
+/**
* get_va_block() - get a virtual block for the given size and alignment.
*
* @hdev: pointer to the habanalabs device structure.
@@ -536,6 +563,8 @@ static inline int add_va_block(struct hl_device *hdev,
* @size: requested block size.
* @hint_addr: hint for requested address by the user.
* @va_block_align: required alignment of the virtual block start address.
+ * @range_type: va range type (host, dram)
+ * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
@@ -545,13 +574,19 @@ static inline int add_va_block(struct hl_device *hdev,
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range,
- u64 size, u64 hint_addr, u32 va_block_align)
+ u64 size, u64 hint_addr, u32 va_block_align,
+ enum hl_va_range_type range_type,
+ u32 flags)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
- align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
+ align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
+ dram_hint_mask = prop->dram_hints_align_mask;
bool add_prev = false;
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
+ bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
+ bool force_hint = flags & HL_MEM_FORCE_HINT;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
@@ -564,12 +599,20 @@ static u64 get_va_block(struct hl_device *hdev,
size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
va_range->page_size;
- tmp_hint_addr = hint_addr;
+ tmp_hint_addr = hint_addr & ~dram_hint_mask;
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
- (!is_align_pow_2 &&
- do_div(tmp_hint_addr, va_range->page_size))) {
+ (!is_align_pow_2 && is_hint_dram_addr &&
+ do_div(tmp_hint_addr, va_range->page_size))) {
+
+ if (force_hint) {
+ /* Hint must be respected, so here we just fail */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx is not page aligned - cannot be respected\n",
+ hint_addr);
+ return 0;
+ }
dev_dbg(hdev->dev,
"Hint address 0x%llx will be ignored because it is not aligned\n",
@@ -596,6 +639,16 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size < size)
continue;
+ /*
+ * In case hint address is 0, and arc_hints_range_reservation
+ * property enabled, then avoid allocating va blocks from the
+ * range reserved for hint addresses
+ */
+ if (prop->hints_range_reservation && !hint_addr)
+ if (is_hint_crossing_range(range_type, valid_start,
+ size, prop))
+ continue;
+
/* Pick the minimal length block which has the required size */
if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
@@ -618,6 +671,17 @@ static u64 get_va_block(struct hl_device *hdev,
goto out;
}
+ if (force_hint && reserved_valid_start != hint_addr) {
+ /* Hint address must be respected. If we are here - this means
+ * we could not respect it.
+ */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx could not be respected\n",
+ hint_addr);
+ reserved_valid_start = 0;
+ goto out;
+ }
+
/*
* Check if there is some leftover range due to reserving the new
* va block, then return it to the main virtual addresses list.
@@ -670,7 +734,8 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
- max(alignment, ctx->va_range[type]->page_size));
+ max(alignment, ctx->va_range[type]->page_size),
+ type, 0);
}
/**
@@ -732,28 +797,15 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
}
/**
- * get_sg_info() - get number of pages and the DMA address from SG list.
- * @sg: the SG list.
- * @dma_addr: pointer to DMA address to return.
- *
- * Calculate the number of consecutive pages described by the SG list. Take the
- * offset of the address in the first page, add to it the length and round it up
- * to the number of needed pages.
- */
-static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
-{
- *dma_addr = sg_dma_address(sg);
-
- return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-}
-
-/**
* init_phys_pg_pack_from_userptr() - initialize physical page pack from host
* memory
* @ctx: pointer to the context structure.
* @userptr: userptr to initialize from.
* @pphys_pg_pack: result pointer.
+ * @force_regular_page: tell the function to ignore huge page optimization,
+ * even if possible. Needed for cases where the device VA
+ * is allocated before we know the composition of the
+ * physical pages
*
* This function does the following:
* - Pin the physical pages related to the given virtual block.
@@ -762,17 +814,18 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_vm_phys_pg_pack **pphys_pg_pack,
+ bool force_regular_page)
{
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ bool first = true, is_huge_page_opt;
+ u64 page_mask, total_npages;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE,
- huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
- bool first = true, is_huge_page_opt = true;
int rc, i, j;
- u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -783,6 +836,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->asid = ctx->asid;
atomic_set(&phys_pg_pack->mapping_cnt, 1);
+ is_huge_page_opt = (force_regular_page ? false : true);
+
/* Only if all dma_addrs are aligned to 2MB and their
* sizes is at least 2MB, we can use huge page mapping.
* We limit the 2MB optimization to this condition,
@@ -791,7 +846,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
*/
total_npages = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
@@ -820,7 +875,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
j = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
if (first) {
@@ -1001,11 +1056,12 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
u64 ret_vaddr, hint_addr;
u32 handle = 0, va_block_align;
int rc;
bool is_userptr = args->flags & HL_MEM_USERPTR;
+ enum hl_va_range_type va_range_type = 0;
/* Assume failure */
*device_addr = 0;
@@ -1023,7 +1079,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
}
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack, false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1031,14 +1087,14 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto init_page_pack_err;
}
- vm_type = (enum vm_type_t *) userptr;
+ vm_type = (enum vm_type *) userptr;
hint_addr = args->map_host.hint_addr;
handle = phys_pg_pack->handle;
/* get required alignment */
if (phys_pg_pack->page_size == page_size) {
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
-
+ va_range_type = HL_VA_RANGE_TYPE_HOST;
/*
* huge page alignment may be needed in case of regular
* page mapping, depending on the host VA alignment
@@ -1053,6 +1109,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* mapping
*/
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
+ va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
va_block_align = huge_page_size;
}
} else {
@@ -1072,12 +1129,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_unlock(&vm->idr_lock);
- vm_type = (enum vm_type_t *) phys_pg_pack;
+ vm_type = (enum vm_type *) phys_pg_pack;
hint_addr = args->map_device.hint_addr;
/* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
+ va_range_type = HL_VA_RANGE_TYPE_DRAM;
va_block_align = hdev->asic_prop.dmmu.page_size;
}
@@ -1100,8 +1158,23 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
+ if (hint_addr && phys_pg_pack->offset) {
+ if (args->flags & HL_MEM_FORCE_HINT) {
+ /* Fail if hint must be respected but it can't be */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ rc = -EINVAL;
+ goto va_block_err;
+ }
+ dev_dbg(hdev->dev,
+ "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ }
+
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
- hint_addr, va_block_align);
+ hint_addr, va_block_align,
+ va_range_type, args->flags);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
@@ -1181,17 +1254,19 @@ init_page_pack_err:
static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
bool ctx_free)
{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ u64 vaddr = args->unmap.device_virt_addr;
struct hl_vm_hash_node *hnode = NULL;
+ struct asic_fixed_properties *prop;
+ struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
struct hl_va_range *va_range;
- u64 vaddr = args->unmap.device_virt_addr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool is_userptr;
int rc = 0;
+ prop = &hdev->asic_prop;
+
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
@@ -1214,8 +1289,9 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
userptr = hnode->ptr;
- rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
+ false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1299,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
kfree(hnode);
if (is_userptr) {
- rc = free_phys_pg_pack(hdev, phys_pg_pack);
+ free_phys_pg_pack(hdev, phys_pg_pack);
dma_unmap_host_va(hdev, userptr);
}
@@ -1669,6 +1745,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ userptr->pid = current->pid;
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
if (!userptr->sgt)
return -ENOMEM;
@@ -2033,7 +2110,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* another side effect error
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev,
+ dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index c5e93ff32586..0f536f79dd9c 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -470,13 +470,13 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
- }
- /* Make sure that if we arrive here again without init was called we
- * won't cause kernel panic. This can happen for example if we fail
- * during hard reset code at certain points
- */
- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ }
}
/**
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index d5bedf5ba011..0b5366cc84fd 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -436,6 +436,8 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
+ dma_set_max_seg_size(&pdev->dev, U32_MAX);
+
return 0;
unmap_pci_bars:
diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/misc/habanalabs/common/state_dump.c
new file mode 100644
index 000000000000..74726907c95e
--- /dev/null
+++ b/drivers/misc/habanalabs/common/state_dump.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+/**
+ * hl_format_as_binary - helper function, format an integer as binary
+ * using supplied scratch buffer
+ * @buf: the buffer to use
+ * @buf_len: buffer capacity
+ * @n: number to format
+ *
+ * Returns pointer to buffer
+ */
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n)
+{
+ int i;
+ u32 bit;
+ bool leading0 = true;
+ char *wrptr = buf;
+
+ if (buf_len > 0 && buf_len < 3) {
+ *wrptr = '\0';
+ return buf;
+ }
+
+ wrptr[0] = '0';
+ wrptr[1] = 'b';
+ wrptr += 2;
+ /* Remove 3 characters from length for '0b' and '\0' termination */
+ buf_len -= 3;
+
+ for (i = 0; i < sizeof(n) * BITS_PER_BYTE && buf_len; ++i, n <<= 1) {
+ /* Writing bit calculation in one line would cause a false
+ * positive static code analysis error, so splitting.
+ */
+ bit = n & (1 << (sizeof(n) * BITS_PER_BYTE - 1));
+ bit = !!bit;
+ leading0 &= !bit;
+ if (!leading0) {
+ *wrptr = '0' + bit;
+ ++wrptr;
+ }
+ }
+
+ *wrptr = '\0';
+
+ return buf;
+}
+
+/**
+ * resize_to_fit - helper function, resize buffer to fit given amount of data
+ * @buf: destination buffer double pointer
+ * @size: pointer to the size container
+ * @desired_size: size the buffer must contain
+ *
+ * Returns 0 on success or error code on failure.
+ * On success, the size of buffer is at least desired_size. Buffer is allocated
+ * via vmalloc and must be freed with vfree.
+ */
+static int resize_to_fit(char **buf, size_t *size, size_t desired_size)
+{
+ char *resized_buf;
+ size_t new_size;
+
+ if (*size >= desired_size)
+ return 0;
+
+ /* Not enough space to print all, have to resize */
+ new_size = max_t(size_t, PAGE_SIZE, round_up(desired_size, PAGE_SIZE));
+ resized_buf = vmalloc(new_size);
+ if (!resized_buf)
+ return -ENOMEM;
+ memcpy(resized_buf, *buf, *size);
+ vfree(*buf);
+ *buf = resized_buf;
+ *size = new_size;
+
+ return 1;
+}
+
+/**
+ * hl_snprintf_resize() - print formatted data to buffer, resize as needed
+ * @buf: buffer double pointer, to be written to and resized, must be either
+ * NULL or allocated with vmalloc.
+ * @size: current size of the buffer
+ * @offset: current offset to write to
+ * @format: format of the data
+ *
+ * This function will write formatted data into the buffer. If buffer is not
+ * large enough, it will be resized using vmalloc. Size may be modified if the
+ * buffer was resized, offset will be advanced by the number of bytes written
+ * not including the terminating character
+ *
+ * Returns 0 on success or error code on failure
+ *
+ * Note that the buffer has to be manually released using vfree.
+ */
+int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...)
+{
+ va_list args;
+ size_t length;
+ int rc;
+
+ if (*buf == NULL && (*size != 0 || *offset != 0))
+ return -EINVAL;
+
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format, args);
+ va_end(args);
+
+ rc = resize_to_fit(buf, size, *offset + length + 1);
+ if (rc < 0)
+ return rc;
+ else if (rc > 0) {
+ /* Resize was needed, write again */
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format,
+ args);
+ va_end(args);
+ }
+
+ *offset += length;
+
+ return 0;
+}
+
+/**
+ * hl_sync_engine_to_string - convert engine type enum to string literal
+ * @engine_type: engine type (TPC/MME/DMA)
+ *
+ * Return the resolved string literal
+ */
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type)
+{
+ switch (engine_type) {
+ case ENGINE_DMA:
+ return "DMA";
+ case ENGINE_MME:
+ return "MME";
+ case ENGINE_TPC:
+ return "TPC";
+ }
+ return "Invalid Engine Type";
+}
+
+/**
+ * hl_print_resize_sync_engine - helper function, format engine name and ID
+ * using hl_snprintf_resize
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @engine_type: engine type (TPC/MME/DMA)
+ * @engine_id: engine numerical id
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_print_resize_sync_engine(char **buf, size_t *size, size_t *offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id)
+{
+ return hl_snprintf_resize(buf, size, offset, "%s%u",
+ hl_sync_engine_to_string(engine_type), engine_id);
+}
+
+/**
+ * hl_state_dump_get_sync_name - transform sync object id to name if available
+ * @hdev: pointer to the device
+ * @sync_id: sync object id
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * sync objects are named.
+ */
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->so_id_to_str_tb, entry,
+ node, sync_id)
+ if (sync_id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_get_monitor_name - transform monitor object dump to monitor
+ * name if available
+ * @hdev: pointer to the device
+ * @mon: monitor state dump
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * monitors are named.
+ */
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->monitor_id_to_str_tb,
+ entry, node, mon->id)
+ if (mon->id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_free_sync_to_engine_map - free sync object to engine map
+ * @map: sync object to engine map
+ *
+ * Note: generic free implementation, the allocation is implemented per ASIC.
+ */
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+ struct hlist_node *tmp_node;
+ int i;
+
+ hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
+ hash_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * hl_state_dump_get_sync_to_engine - transform sync_id to
+ * hl_sync_to_engine_map_entry if available for current id
+ * @map: sync object to engine map
+ * @sync_id: sync object id
+ *
+ * Returns the translation entry if found or NULL if not.
+ * Note, returned NULL shall not be considered as a failure as the map
+ * does not cover all possible, it is a best effort sync ids.
+ */
+static struct hl_sync_to_engine_map_entry *
+hl_state_dump_get_sync_to_engine(struct hl_sync_to_engine_map *map, u32 sync_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ hash_for_each_possible(map->tb, entry, node, sync_id)
+ if (entry->sync_id == sync_id)
+ return entry;
+ return NULL;
+}
+
+/**
+ * hl_state_dump_read_sync_objects - read sync objects array
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns array of size SP_SYNC_OBJ_AMOUNT on success or NULL on failure
+ */
+static u32 *hl_state_dump_read_sync_objects(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 *sync_objects;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ base_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ sync_objects = vmalloc(sds->props[SP_SYNC_OBJ_AMOUNT] * sizeof(u32));
+ if (!sync_objects)
+ return NULL;
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i)
+ sync_objects[i] = RREG32(base_addr + i * sizeof(u32));
+
+ return sync_objects;
+}
+
+/**
+ * hl_state_dump_free_sync_objects - free sync objects array allocated by
+ * hl_state_dump_read_sync_objects
+ * @sync_objects: sync objects array
+ */
+static void hl_state_dump_free_sync_objects(u32 *sync_objects)
+{
+ vfree(sync_objects);
+}
+
+
+/**
+ * hl_state_dump_print_syncs_single_block - print active sync objects on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @map: sync engines names map
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int
+hl_state_dump_print_syncs_single_block(struct hl_device *hdev, u32 index,
+ char **buf, size_t *size, size_t *offset,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ const char *sync_name;
+ u32 *sync_objects = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ sync_objects = hl_state_dump_read_sync_objects(hdev, index);
+ if (!sync_objects) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i) {
+ struct hl_sync_to_engine_map_entry *entry;
+ u64 sync_object_addr;
+
+ if (!sync_objects[i])
+ continue;
+
+ sync_object_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index +
+ i * sizeof(u32);
+
+ rc = hl_snprintf_resize(buf, size, offset, "sync id: %u", i);
+ if (rc)
+ goto free_sync_objects;
+ sync_name = hl_state_dump_get_sync_name(hdev, i);
+ if (sync_name) {
+ rc = hl_snprintf_resize(buf, size, offset, " %s",
+ sync_name);
+ if (rc)
+ goto free_sync_objects;
+ }
+ rc = hl_snprintf_resize(buf, size, offset, ", value: %u",
+ sync_objects[i]);
+ if (rc)
+ goto free_sync_objects;
+
+ /* Append engine string */
+ entry = hl_state_dump_get_sync_to_engine(map,
+ (u32)sync_object_addr);
+ if (entry) {
+ rc = hl_snprintf_resize(buf, size, offset,
+ ", Engine: ");
+ if (rc)
+ goto free_sync_objects;
+ rc = hl_print_resize_sync_engine(buf, size, offset,
+ entry->engine_type,
+ entry->engine_id);
+ if (rc)
+ goto free_sync_objects;
+ }
+
+ rc = hl_snprintf_resize(buf, size, offset, "\n");
+ if (rc)
+ goto free_sync_objects;
+ }
+
+free_sync_objects:
+ hl_state_dump_free_sync_objects(sync_objects);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_syncs - print active sync objects
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_syncs(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_sync_to_engine_map *map;
+ u32 index;
+ int rc = 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ rc = sds->funcs.gen_sync_to_engine_map(hdev, map);
+ if (rc)
+ goto free_map_mem;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Non zero sync objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ hl_state_dump_free_sync_to_engine_map(map);
+free_map_mem:
+ kfree(map);
+
+ return rc;
+}
+
+/**
+ * hl_state_dump_alloc_read_sm_block_monitors - read monitors for a specific
+ * block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns an array of monitor data of size SP_MONITORS_AMOUNT or NULL
+ * on error
+ */
+static struct hl_mon_state_dump *
+hl_state_dump_alloc_read_sm_block_monitors(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ monitors = vmalloc(sds->props[SP_MONITORS_AMOUNT] *
+ sizeof(struct hl_mon_state_dump));
+ if (!monitors)
+ return NULL;
+
+ base_addr = sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ monitors[i].id = i;
+ monitors[i].wr_addr_low =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_LOW] +
+ i * sizeof(u32));
+
+ monitors[i].wr_addr_high =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_HIGH] +
+ i * sizeof(u32));
+
+ monitors[i].wr_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].arm_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_ARM_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].status =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_STATUS] +
+ i * sizeof(u32));
+ }
+
+ return monitors;
+}
+
+/**
+ * hl_state_dump_free_monitors - free the monitors structure
+ * @monitors: monitors array created with
+ * hl_state_dump_alloc_read_sm_block_monitors
+ */
+static void hl_state_dump_free_monitors(struct hl_mon_state_dump *monitors)
+{
+ vfree(monitors);
+}
+
+/**
+ * hl_state_dump_print_monitors_single_block - print active monitors on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors_single_block(struct hl_device *hdev,
+ u32 index,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ monitors = hl_state_dump_alloc_read_sm_block_monitors(hdev, index);
+ if (!monitors) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ if (!(sds->funcs.monitor_valid(&monitors[i])))
+ continue;
+
+ /* Monitor is valid, dump it */
+ rc = sds->funcs.print_single_monitor(buf, size, offset, hdev,
+ &monitors[i]);
+ if (rc)
+ goto free_monitors;
+
+ hl_snprintf_resize(buf, size, offset, "\n");
+ }
+
+free_monitors:
+ hl_state_dump_free_monitors(monitors);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_monitors - print active monitors
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 index;
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset,
+ "Valid (armed) monitor objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_engine_fences - print active fences for a specific
+ * engine
+ * @hdev: pointer to the device
+ * @engine_type: engine type to use
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int
+hl_state_dump_print_engine_fences(struct hl_device *hdev,
+ enum hl_sync_engine_type engine_type,
+ char **buf, size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = 0, i, n_fences;
+ u64 base_addr, next_fence;
+
+ switch (engine_type) {
+ case ENGINE_TPC:
+ n_fences = sds->props[SP_NUM_OF_TPC_ENGINES];
+ base_addr = sds->props[SP_TPC0_CMDQ];
+ next_fence = sds->props[SP_NEXT_TPC];
+ break;
+ case ENGINE_MME:
+ n_fences = sds->props[SP_NUM_OF_MME_ENGINES];
+ base_addr = sds->props[SP_MME_CMDQ];
+ next_fence = sds->props[SP_NEXT_MME];
+ break;
+ case ENGINE_DMA:
+ n_fences = sds->props[SP_NUM_OF_DMA_ENGINES];
+ base_addr = sds->props[SP_DMA_CMDQ];
+ next_fence = sds->props[SP_DMA_QUEUES_OFFSET];
+ break;
+ default:
+ return -EINVAL;
+ }
+ for (i = 0; i < n_fences; ++i) {
+ rc = sds->funcs.print_fences_single_engine(
+ hdev,
+ base_addr + next_fence * i +
+ sds->props[SP_FENCE0_CNT_OFFSET],
+ base_addr + next_fence * i +
+ sds->props[SP_CP_STS_OFFSET],
+ engine_type, i, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_fences - print active fences
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int hl_state_dump_print_fences(struct hl_device *hdev, char **buf,
+ size_t *size, size_t *offset)
+{
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Valid (armed) fences:\n");
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_TPC, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_MME, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_DMA, buf, size, offset);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump() - dump system state
+ * @hdev: pointer to device structure
+ */
+int hl_state_dump(struct hl_device *hdev)
+{
+ char *buf = NULL;
+ size_t offset = 0, size = 0;
+ int rc;
+
+ rc = hl_snprintf_resize(&buf, &size, &offset,
+ "Timestamp taken on: %llu\n\n",
+ ktime_to_ns(ktime_get()));
+ if (rc)
+ goto err;
+
+ rc = hl_state_dump_print_syncs(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_monitors(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_fences(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ hl_debugfs_set_state_dump(hdev, buf, size);
+
+ return 0;
+err:
+ vfree(buf);
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index db72df282ef8..34f9f2779962 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,8 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
- bool curr)
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -44,8 +43,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
- u64 freq)
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -285,16 +283,12 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- char *str;
+ char str[HL_STR_MAX];
- if (atomic_read(&hdev->in_reset))
- str = "In reset";
- else if (hdev->disabled)
- str = "Malfunction";
- else if (hdev->needs_reset)
- str = "Needs Reset";
- else
- str = "Operational";
+ strscpy(str, hdev->status[hl_device_status(hdev)], HL_STR_MAX);
+
+ /* use uppercase for backward compatibility */
+ str[0] = 'A' + (str[0] - 'a');
return sprintf(buf, "%s\n", str);
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index aa8a0ca5aca2..383865be3c2c 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -76,7 +76,7 @@
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
-#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
@@ -106,6 +106,21 @@
#define GAUDI_PLL_MAX 10
+#define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
+
+#define MONITOR_SOB_STRING_SIZE 256
+
+static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
+ GAUDI_QUEUE_ID_DMA_0_0,
+ GAUDI_QUEUE_ID_DMA_0_1,
+ GAUDI_QUEUE_ID_DMA_0_2,
+ GAUDI_QUEUE_ID_DMA_0_3,
+ GAUDI_QUEUE_ID_DMA_1_0,
+ GAUDI_QUEUE_ID_DMA_1_1,
+ GAUDI_QUEUE_ID_DMA_1_2,
+ GAUDI_QUEUE_ID_DMA_1_3
+};
+
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -348,6 +363,97 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
+ { .id = 0, .name = "SYNC_OBJ_DMA_DOWN_FEEDBACK" },
+ { .id = 1, .name = "SYNC_OBJ_DMA_UP_FEEDBACK" },
+ { .id = 2, .name = "SYNC_OBJ_DMA_STATIC_DRAM_SRAM_FEEDBACK" },
+ { .id = 3, .name = "SYNC_OBJ_DMA_SRAM_DRAM_FEEDBACK" },
+ { .id = 4, .name = "SYNC_OBJ_FIRST_COMPUTE_FINISH" },
+ { .id = 5, .name = "SYNC_OBJ_HOST_DRAM_DONE" },
+ { .id = 6, .name = "SYNC_OBJ_DBG_CTR_DEPRECATED" },
+ { .id = 7, .name = "SYNC_OBJ_DMA_ACTIVATIONS_DRAM_SRAM_FEEDBACK" },
+ { .id = 8, .name = "SYNC_OBJ_ENGINE_SEM_MME_0" },
+ { .id = 9, .name = "SYNC_OBJ_ENGINE_SEM_MME_1" },
+ { .id = 10, .name = "SYNC_OBJ_ENGINE_SEM_TPC_0" },
+ { .id = 11, .name = "SYNC_OBJ_ENGINE_SEM_TPC_1" },
+ { .id = 12, .name = "SYNC_OBJ_ENGINE_SEM_TPC_2" },
+ { .id = 13, .name = "SYNC_OBJ_ENGINE_SEM_TPC_3" },
+ { .id = 14, .name = "SYNC_OBJ_ENGINE_SEM_TPC_4" },
+ { .id = 15, .name = "SYNC_OBJ_ENGINE_SEM_TPC_5" },
+ { .id = 16, .name = "SYNC_OBJ_ENGINE_SEM_TPC_6" },
+ { .id = 17, .name = "SYNC_OBJ_ENGINE_SEM_TPC_7" },
+ { .id = 18, .name = "SYNC_OBJ_ENGINE_SEM_DMA_1" },
+ { .id = 19, .name = "SYNC_OBJ_ENGINE_SEM_DMA_2" },
+ { .id = 20, .name = "SYNC_OBJ_ENGINE_SEM_DMA_3" },
+ { .id = 21, .name = "SYNC_OBJ_ENGINE_SEM_DMA_4" },
+ { .id = 22, .name = "SYNC_OBJ_ENGINE_SEM_DMA_5" },
+ { .id = 23, .name = "SYNC_OBJ_ENGINE_SEM_DMA_6" },
+ { .id = 24, .name = "SYNC_OBJ_ENGINE_SEM_DMA_7" },
+ { .id = 25, .name = "SYNC_OBJ_DBG_CTR_0" },
+ { .id = 26, .name = "SYNC_OBJ_DBG_CTR_1" },
+};
+
+static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
+ { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
+ { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
+ { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
+ { .id = 206, .name = "MON_OBJ_TPC_2_CLK_GATE" },
+ { .id = 207, .name = "MON_OBJ_TPC_3_CLK_GATE" },
+ { .id = 208, .name = "MON_OBJ_TPC_4_CLK_GATE" },
+ { .id = 209, .name = "MON_OBJ_TPC_5_CLK_GATE" },
+ { .id = 210, .name = "MON_OBJ_TPC_6_CLK_GATE" },
+ { .id = 211, .name = "MON_OBJ_TPC_7_CLK_GATE" },
+};
+
+static s64 gaudi_state_dump_specs_props[] = {
+ [SP_SYNC_OBJ_BASE_ADDR] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0,
+ [SP_NEXT_SYNC_OBJ_ADDR] = NEXT_SYNC_OBJ_ADDR_INTERVAL,
+ [SP_SYNC_OBJ_AMOUNT] = NUM_OF_SOB_IN_BLOCK,
+ [SP_MON_OBJ_WR_ADDR_LOW] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0,
+ [SP_MON_OBJ_WR_ADDR_HIGH] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0,
+ [SP_MON_OBJ_WR_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0,
+ [SP_MON_OBJ_ARM_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0,
+ [SP_MON_OBJ_STATUS] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0,
+ [SP_MONITORS_AMOUNT] = NUM_OF_MONITORS_IN_BLOCK,
+ [SP_TPC0_CMDQ] = mmTPC0_QM_GLBL_CFG0,
+ [SP_TPC0_CFG_SO] = mmTPC0_CFG_QM_SYNC_OBJECT_ADDR,
+ [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
+ [SP_MME_CMDQ] = mmMME0_QM_GLBL_CFG0,
+ [SP_MME_CFG_SO] = mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL,
+ [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
+ [SP_DMA_CMDQ] = mmDMA0_QM_GLBL_CFG0,
+ [SP_DMA_CFG_SO] = mmDMA0_CORE_WR_COMP_ADDR_LO,
+ [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_OF_MME_ENGINES] = NUM_OF_MME_ENGINES,
+ [SP_SUB_MME_ENG_NUM] = NUM_OF_MME_SUB_ENGINES,
+ [SP_NUM_OF_DMA_ENGINES] = NUM_OF_DMA_ENGINES,
+ [SP_NUM_OF_TPC_ENGINES] = NUM_OF_TPC_ENGINES,
+ [SP_ENGINE_NUM_OF_QUEUES] = NUM_OF_QUEUES,
+ [SP_ENGINE_NUM_OF_STREAMS] = NUM_OF_STREAMS,
+ [SP_ENGINE_NUM_OF_FENCES] = NUM_OF_FENCES,
+ [SP_FENCE0_CNT_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_FENCE0_RDATA_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_CORES] = 1,
+};
+
+/* The order here is opposite to the order of the indexing in the h/w.
+ * i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
+ */
+static const char * const gaudi_sync_manager_names[] = {
+ "SYNC_MGR_E_N",
+ "SYNC_MGR_W_N",
+ "SYNC_MGR_E_S",
+ "SYNC_MGR_W_S",
+ NULL
+};
+
struct ecc_info_extract_params {
u64 block_address;
u32 num_memories;
@@ -363,8 +469,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val);
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -375,7 +479,6 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
-
static inline enum hl_collective_mode
get_collective_mode(struct hl_device *hdev, u32 queue_id)
{
@@ -403,7 +506,11 @@ static inline void set_default_power_values(struct hl_device *hdev)
if (hdev->card_type == cpucp_card_type_pmc) {
prop->max_power_default = MAX_POWER_DEFAULT_PMC;
- prop->dc_power_default = DC_POWER_DEFAULT_PMC;
+
+ if (prop->fw_security_enabled)
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC;
+ else
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC;
} else {
prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->dc_power_default = DC_POWER_DEFAULT_PCI;
@@ -450,6 +557,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
get_collective_mode(hdev, i);
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -551,6 +659,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -723,14 +833,14 @@ pci_init:
GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
return 0;
@@ -974,17 +1084,11 @@ static void gaudi_sob_group_hw_reset(struct kref *ref)
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- u64 base_addr;
- int rc;
+ int i;
- base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob_group->base_sob_id * 4;
- rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
- base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
- if (rc)
- dev_err(hdev->dev,
- "failed resetting sob group - sob base %u, count %u",
- hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
+ for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
+ WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
kref_init(&hw_sob_group->kref);
}
@@ -1121,6 +1225,20 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+ if (job->cs->encaps_signals) {
+ /* use the encaps signal handle store earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
+ cs_cmpl);
+
+ dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
+ job->cs->sequence,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+ }
+
/* Add to wait CBs using slave monitor */
wait_prop.data = (void *) job->user_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -1131,7 +1249,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
wait_prop.size = cb_size;
dev_dbg(hdev->dev,
- "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n",
+ "Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->collective_slave_mon_id, queue_id);
@@ -1145,7 +1263,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, cb_size, false);
}
-static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
+static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
{
struct hl_cs_compl *signal_cs_cmpl =
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
@@ -1163,9 +1281,37 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ /* In encaps signals case the SOB info will be retrieved from
+ * the handle in gaudi_collective_slave_init_job.
+ */
+ if (!cs->encaps_signals) {
+ /* copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+ /* Increment kref since all slave queues are now waiting on it */
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
/* Calculate the stream from collective master queue (1st job) */
job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
@@ -1210,21 +1356,17 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
cprop->curr_sob_group_idx[stream], stream);
}
- /* Increment kref since all slave queues are now waiting on it */
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
- enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id)
+ enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
+ u32 encaps_signal_offset)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_cs_counters_atomic *cntr;
@@ -1284,6 +1426,13 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = queue_id;
+ /* since its guaranteed to have only one chunk in the collective wait
+ * cs, we can use this chunk to set the encapsulated signal offset
+ * in the jobs.
+ */
+ if (cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
+
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need
@@ -1312,8 +1461,9 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
}
static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hw_queue_properties *hw_queue_prop;
@@ -1363,7 +1513,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
if (i == 0) {
queue_id = wait_queue_id;
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_MASTER, queue_id, wait_queue_id);
+ HL_COLLECTIVE_MASTER, queue_id,
+ wait_queue_id, encaps_signal_offset);
} else {
if (nic_idx < NIC_NUMBER_OF_ENGINES) {
if (gaudi->hw_cap_initialized &
@@ -1383,7 +1534,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
}
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id);
+ HL_COLLECTIVE_SLAVE, queue_id,
+ wait_queue_id, encaps_signal_offset);
}
if (rc)
@@ -1431,6 +1583,11 @@ static int gaudi_late_init(struct hl_device *hdev)
return rc;
}
+ /* Scrub both SRAM and DRAM */
+ rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+ if (rc)
+ goto disable_pci_access;
+
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
@@ -1455,6 +1612,11 @@ static int gaudi_late_init(struct hl_device *hdev)
goto disable_pci_access;
}
+ /* We only support a single ASID for the user, so for the sake of optimization, just
+ * initialize the ASID one time during device initialization with the fixed value of 1
+ */
+ gaudi_mmu_prepare(hdev, 1);
+
return 0;
disable_pci_access:
@@ -1720,8 +1882,12 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
+ hdev->supports_wait_for_multi_cs = true;
- gaudi_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
+ hdev->stream_master_qid_arr =
+ hdev->asic_funcs->get_stream_master_qid_arr();
+ hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE;
return 0;
@@ -2523,7 +2689,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
/* Mask all arithmetic interrupts from TPC */
- WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFE);
/* Set 16 cache lines */
WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
ICACHE_FETCH_LINE_NUM, 2);
@@ -3670,7 +3836,7 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -3682,6 +3848,9 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
+ if (fw_reset)
+ goto skip_engines;
+
gaudi_stop_nic_qmans(hdev);
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
@@ -3707,6 +3876,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
+skip_engines:
gaudi_disable_msi(hdev);
}
@@ -3739,6 +3909,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+ /* mem cache invalidation */
+ WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
+
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
@@ -4071,7 +4244,7 @@ disable_queues:
return rc;
}
-static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -4092,6 +4265,14 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
+ if (fw_reset) {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+
+ goto skip_reset;
+ }
+
driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
!hdev->asic_prop.hard_reset_done_by_fw);
@@ -4168,6 +4349,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
reset_timeout_ms);
}
+skip_reset:
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
* itself is in reset. Need to wait until the reset is deasserted
@@ -4212,7 +4394,7 @@ static int gaudi_resume(struct hl_device *hdev)
return gaudi_init_iatu(hdev);
}
-static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4621,8 +4803,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -5796,78 +5978,6 @@ release_cb:
return rc;
}
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
-{
- struct hl_ctx *ctx;
- struct hl_pending_cb *pending_cb;
- struct packet_msg_long *pkt;
- u32 cb_size, ctl;
- struct hl_cb *cb;
- int i, rc;
-
- mutex_lock(&hdev->fpriv_list_lock);
- ctx = hdev->compute_ctx;
-
- /* If no compute context available or context is going down
- * memset registers directly
- */
- if (!ctx || kref_read(&ctx->refcount) == 0) {
- rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
- mutex_unlock(&hdev->fpriv_list_lock);
- return rc;
- }
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- cb_size = (sizeof(*pkt) * num_regs) +
- sizeof(struct packet_msg_prot) * 2;
-
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
- return -ENOMEM;
- }
-
- pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
- if (!pending_cb)
- return -ENOMEM;
-
- cb = hl_cb_kernel_create(hdev, cb_size, false);
- if (!cb) {
- kfree(pending_cb);
- return -EFAULT;
- }
-
- pkt = cb->kernel_address;
-
- ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
-
- for (i = 0; i < num_regs ; i++, pkt++) {
- pkt->ctl = cpu_to_le32(ctl);
- pkt->value = cpu_to_le32(val);
- pkt->addr = cpu_to_le64(reg_base + (i * 4));
- }
-
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
-
- pending_cb->cb = cb;
- pending_cb->cb_size = cb_size;
- /* The queue ID MUST be an external queue ID. Otherwise, we will
- * have undefined behavior
- */
- pending_cb->hw_queue_id = hw_queue_id;
-
- spin_lock(&ctx->pending_cb_lock);
- list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- return 0;
-}
-
static int gaudi_restore_sm_registers(struct hl_device *hdev)
{
u64 base_addr;
@@ -6013,7 +6123,7 @@ static int gaudi_restore_user_registers(struct hl_device *hdev)
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
- return gaudi_restore_user_registers(hdev);
+ return 0;
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
@@ -6723,6 +6833,9 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6772,7 +6885,8 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
- WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
job->job_cb_size, cb->bus_address);
@@ -6793,8 +6907,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
}
free_fence_ptr:
- WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
- ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
@@ -7168,7 +7281,7 @@ static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream
cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
size = RREG32(cq_tsize);
- dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
stream, cq_ptr, size);
}
@@ -7224,7 +7337,7 @@ static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
addr = le64_to_cpu(bd->ptr);
- dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
stream, ci, addr, len);
/* get previous ci, wrap if needed */
@@ -7326,24 +7439,30 @@ static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
{
u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
+ /* Flip the bits as the enum is ordered in the opposite way */
+ index = (index ^ 0x3) & 0x3;
+
switch (sei_data->sei_cause) {
case SM_SEI_SO_OVERFLOW:
- dev_err(hdev->dev,
- "SM %u SEI Error: SO %u overflow/underflow",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: SOB Group %u overflow/underflow",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_LBW_4B_UNALIGNED:
- dev_err(hdev->dev,
- "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_AXI_RESPONSE_ERR:
- dev_err(hdev->dev,
- "SM %u SEI Error: AXI ID %u response error",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: AXI ID %u response error",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
default:
- dev_err(hdev->dev, "Unknown SM SEI cause %u",
+ dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u",
le32_to_cpu(sei_data->sei_log));
break;
}
@@ -7358,6 +7477,11 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
bool extract_info_from_fw;
int rc;
+ if (hdev->asic_prop.fw_security_enabled) {
+ extract_info_from_fw = true;
+ goto extract_ecc_info;
+ }
+
switch (event_type) {
case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
@@ -7430,6 +7554,7 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
return;
}
+extract_ecc_info:
if (extract_info_from_fw) {
ecc_address = le64_to_cpu(ecc_data->ecc_address);
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
@@ -7806,8 +7931,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
- u8 cause;
bool reset_required;
+ u8 cause;
+ int rc;
+
+ if (event_type >= GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GAUDI_EVENT_SIZE - 1);
+ return;
+ }
gaudi->events_stat[event_type]++;
gaudi->events_stat_aggregate[event_type]++;
@@ -7880,10 +8012,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7902,10 +8034,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7993,6 +8125,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Error during system state dump %d\n", rc);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -8031,7 +8167,9 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
return;
reset_device:
- if (hdev->hard_reset_on_fw_events)
+ if (hdev->asic_prop.fw_security_enabled)
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW);
+ else if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, HL_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
@@ -8563,11 +8701,20 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
+ int rc;
+
if (ctx->asid == HL_KERNEL_ASID_ID)
return 0;
- gaudi_mmu_prepare(ctx->hdev, ctx->asid);
- return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ if (rc)
+ return rc;
+
+ rc = gaudi_restore_user_registers(ctx->hdev);
+ if (rc)
+ gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
+
+ return rc;
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
@@ -8596,6 +8743,11 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
sizeof(struct packet_msg_prot) * 2;
}
+static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
+}
+
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
@@ -8902,16 +9054,12 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
- int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
- CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob->sob_id * 4, 1, 0);
- if (rc)
- dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
@@ -8977,6 +9125,280 @@ static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int gaudi_add_sync_to_engine_map_entry(
+ struct hl_sync_to_engine_map *map, u32 reg_value,
+ enum hl_sync_engine_type engine_type, u32 engine_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ /* Reg value represents a partial address of sync object,
+ * it is used as unique identifier. For this we need to
+ * clear the cutoff cfg base bits from the value.
+ */
+ if (reg_value == 0 || reg_value == 0xffffffff)
+ return 0;
+ reg_value -= (u32)CFG_BASE;
+
+ /* create a new hash entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->engine_type = engine_type;
+ entry->engine_id = engine_id;
+ entry->sync_id = reg_value;
+ hash_add(map->tb, &entry->node, reg_value);
+
+ return 0;
+}
+
+static int gaudi_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, j, rc;
+ u32 reg_value;
+
+ /* Iterate over TPC engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) {
+ /* TPC registered must be accessed with clock gating disabled */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] +
+ sds->props[SP_NEXT_TPC] * i);
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_TPC, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ /* Iterate over MME engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) {
+ for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) {
+ /* MME registered must be accessed with clock gating
+ * disabled
+ */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_MME_CFG_SO] +
+ sds->props[SP_NEXT_MME] * i +
+ j * sizeof(u32));
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(
+ map, reg_value, ENGINE_MME,
+ i * sds->props[SP_SUB_MME_ENG_NUM] + j);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+ }
+
+ /* Iterate over DMA engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) {
+ reg_value = RREG32(sds->props[SP_DMA_CFG_SO] +
+ sds->props[SP_DMA_QUEUES_OFFSET] * i);
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_DMA, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ return 0;
+
+free_sync_to_engine_map:
+ hl_state_dump_free_sync_to_engine_map(map);
+
+ return rc;
+}
+
+static int gaudi_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ return FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK,
+ mon->status);
+}
+
+static void gaudi_fill_sobs_from_mon(char *sobs, struct hl_mon_state_dump *mon)
+{
+ const size_t max_write = 10;
+ u32 gid, mask, sob;
+ int i, offset;
+
+ /* Sync object ID is calculated as follows:
+ * (8 * group_id + cleared bits in mask)
+ */
+ gid = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data);
+ mask = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data);
+
+ for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE -
+ max_write; mask >>= 1, i++) {
+ if (!(mask & 1)) {
+ sob = gid * MONITOR_MAX_SOBS + i;
+
+ if (offset > 0)
+ offset += snprintf(sobs + offset, max_write,
+ ", ");
+
+ offset += snprintf(sobs + offset, max_write, "%u", sob);
+ }
+ }
+}
+
+static int gaudi_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ const char *name;
+ char scratch_buf1[BIN_REG_STRING_SIZE],
+ scratch_buf2[BIN_REG_STRING_SIZE];
+ char monitored_sobs[MONITOR_SOB_STRING_SIZE] = {0};
+
+ name = hl_state_dump_get_monitor_name(hdev, mon);
+ if (!name)
+ name = "";
+
+ gaudi_fill_sobs_from_mon(monitored_sobs, mon);
+
+ return hl_snprintf_resize(
+ buf, size, offset,
+ "Mon id: %u%s, wait for group id: %u mask %s to reach val: %u and write %u to address 0x%llx. Pending: %s. Means sync objects [%s] are being monitored.",
+ mon->id, name,
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data),
+ hl_format_as_binary(
+ scratch_buf1, sizeof(scratch_buf1),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data)),
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK,
+ mon->arm_data),
+ mon->wr_data,
+ (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low,
+ hl_format_as_binary(
+ scratch_buf2, sizeof(scratch_buf2),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK,
+ mon->status)),
+ monitored_sobs);
+}
+
+
+static int gaudi_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = -ENOMEM, i;
+ u32 *statuses, *fences;
+
+ statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*statuses), GFP_KERNEL);
+ if (!statuses)
+ goto out;
+
+ fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto free_status;
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i)
+ statuses[i] = RREG32(status_base_offset + i * sizeof(u32));
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i)
+ fences[i] = RREG32(base_offset + i * sizeof(u32));
+
+ /* The actual print */
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) {
+ u32 fence_id;
+ u64 fence_cnt, fence_rdata;
+ const char *engine_name;
+
+ if (!FIELD_GET(TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK,
+ statuses[i]))
+ continue;
+
+ fence_id =
+ FIELD_GET(TPC0_QM_CP_STS_0_FENCE_ID_MASK, statuses[i]);
+ fence_cnt = base_offset + CFG_BASE +
+ sizeof(u32) *
+ (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]);
+ fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] +
+ sds->props[SP_FENCE0_RDATA_OFFSET];
+ engine_name = hl_sync_engine_to_string(engine_type);
+
+ rc = hl_snprintf_resize(
+ buf, size, offset,
+ "%s%u, stream %u: fence id %u cnt = 0x%llx (%s%u_QM.CP_FENCE%u_CNT_%u) rdata = 0x%llx (%s%u_QM.CP_FENCE%u_RDATA_%u) value = %u, cp_status = %u\n",
+ engine_name, engine_id,
+ i, fence_id,
+ fence_cnt, engine_name, engine_id, fence_id, i,
+ fence_rdata, engine_name, engine_id, fence_id, i,
+ fences[fence_id],
+ statuses[i]);
+ if (rc)
+ goto free_fences;
+ }
+
+ rc = 0;
+
+free_fences:
+ kfree(fences);
+free_status:
+ kfree(statuses);
+out:
+ return rc;
+}
+
+
+static struct hl_state_dump_specs_funcs gaudi_state_dump_funcs = {
+ .monitor_valid = gaudi_monitor_valid,
+ .print_single_monitor = gaudi_print_single_monitor,
+ .gen_sync_to_engine_map = gaudi_gen_sync_to_engine_map,
+ .print_fences_single_engine = gaudi_print_fences_single_engine,
+};
+
+static void gaudi_state_dump_init(struct hl_device *hdev)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_so_id_to_str); ++i)
+ hash_add(sds->so_id_to_str_tb,
+ &gaudi_so_id_to_str[i].node,
+ gaudi_so_id_to_str[i].id);
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_monitor_id_to_str); ++i)
+ hash_add(sds->monitor_id_to_str_tb,
+ &gaudi_monitor_id_to_str[i].node,
+ gaudi_monitor_id_to_str[i].id);
+
+ sds->props = gaudi_state_dump_specs_props;
+
+ sds->sync_namager_names = gaudi_sync_manager_names;
+
+ sds->funcs = gaudi_state_dump_funcs;
+}
+
+static u32 *gaudi_get_stream_master_qid_arr(void)
+{
+ return gaudi_stream_master;
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8989,7 +9411,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_engines = gaudi_halt_engines,
.suspend = gaudi_suspend,
.resume = gaudi_resume,
- .cb_mmap = gaudi_cb_mmap,
+ .mmap = gaudi_mmap,
.ring_doorbell = gaudi_ring_doorbell,
.pqe_write = gaudi_pqe_write,
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
@@ -9062,7 +9484,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
.enable_events_from_fw = gaudi_enable_events_from_fw,
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
.init_firmware_loader = gaudi_init_firmware_loader,
- .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm
+ .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
+ .state_dump_init = gaudi_state_dump_init,
+ .get_sob_addr = gaudi_get_sob_addr,
+ .set_pci_memory_regions = gaudi_set_pci_memory_regions,
+ .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 957bf3720f70..bbbf1c343e75 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -36,6 +36,8 @@
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
NUMBER_OF_CPU_HW_QUEUES)
+#define GAUDI_STREAM_MASTER_ARR_SIZE 8
+
#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
#endif
@@ -50,6 +52,8 @@
#define DC_POWER_DEFAULT_PCI 60000 /* 60W */
#define DC_POWER_DEFAULT_PMC 60000 /* 60W */
+#define DC_POWER_DEFAULT_PMC_SEC 97000 /* 97W */
+
#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define TPC_ENABLED_MASK 0xFF
@@ -62,7 +66,7 @@
#define DMA_MAX_TRANSFER_SIZE U32_MAX
-#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_DEFAULT_CARD_NAME "HL205"
#define GAUDI_MAX_PENDING_CS SZ_16K
@@ -117,6 +121,7 @@
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+#define MONITOR_MAX_SOBS 8
/* DRAM Memory Map */
@@ -200,6 +205,18 @@
#define HW_CAP_TPC_MASK GENMASK(31, 24)
#define HW_CAP_TPC_SHIFT 24
+#define NEXT_SYNC_OBJ_ADDR_INTERVAL \
+ (mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0)
+#define NUM_OF_MME_ENGINES 2
+#define NUM_OF_MME_SUB_ENGINES 2
+#define NUM_OF_TPC_ENGINES 8
+#define NUM_OF_DMA_ENGINES 8
+#define NUM_OF_QUEUES 5
+#define NUM_OF_STREAMS 4
+#define NUM_OF_FENCES 4
+
+
#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
#define GAUDI_PCI_TO_CPU_ADDR(addr) \
do { \
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index c2a27ed1c4d1..5349c1be13f9 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -622,11 +622,6 @@ static int gaudi_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER,
- hdev->compute_ctx->asid);
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER,
- hdev->compute_ctx->asid);
-
msb = upper_32_bits(input->buffer_address) >> 8;
msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 0d3240f1f7d7..cb265c00cf73 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -9559,6 +9559,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10013,6 +10014,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10466,6 +10468,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10919,6 +10922,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11372,6 +11376,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11825,6 +11830,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12280,6 +12286,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12735,6 +12742,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 755e08cf2ecc..031c1849da14 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -350,6 +350,8 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
+static s64 goya_state_dump_specs_props[SP_MAX] = {0};
+
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
@@ -387,6 +389,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -466,6 +469,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -649,14 +654,14 @@ pci_init:
GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
if (!hdev->pldm) {
@@ -955,8 +960,9 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
hdev->allow_external_soft_reset = true;
+ hdev->supports_wait_for_multi_cs = false;
- goya_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
return 0;
@@ -2374,7 +2380,7 @@ static void goya_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -2493,6 +2499,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
+ fw_loader->linux_loaded = false;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
@@ -2696,14 +2703,7 @@ disable_queues:
return rc;
}
-/*
- * goya_hw_fini - Goya hardware tear-down code
- *
- * @hdev: pointer to hl_device structure
- * @hard_reset: should we do hard reset to all engines or just reset the
- * compute/dma engines
- */
-static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
@@ -2796,7 +2796,7 @@ int goya_resume(struct hl_device *hdev)
return goya_init_iatu(hdev);
}
-static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4797,6 +4797,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
>> EQ_CTL_EVENT_TYPE_SHIFT);
struct goya_device *goya = hdev->asic_specific;
+ if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
+ return;
+ }
+
goya->events_stat[event_type]++;
goya->events_stat_aggregate[event_type]++;
@@ -5475,14 +5481,14 @@ u64 goya_get_device_time(struct hl_device *hdev)
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
-static void goya_collective_wait_init_cs(struct hl_cs *cs)
+static int goya_collective_wait_init_cs(struct hl_cs *cs)
{
-
+ return 0;
}
static int goya_collective_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ u32 collective_engine_id, u32 encaps_signal_offset)
{
return -EINVAL;
}
@@ -5524,6 +5530,62 @@ static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static int goya_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
+ .monitor_valid = goya_monitor_valid,
+ .print_single_monitor = goya_print_single_monitor,
+ .gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
+ .print_fences_single_engine = goya_print_fences_single_engine,
+};
+
+static void goya_state_dump_init(struct hl_device *hdev)
+{
+ /* Not implemented */
+ hdev->state_dump_specs.props = goya_state_dump_specs_props;
+ hdev->state_dump_specs.funcs = goya_state_dump_funcs;
+}
+
+static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return 0;
+}
+
+static u32 *goya_get_stream_master_qid_arr(void)
+{
+ return NULL;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5536,7 +5598,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_engines = goya_halt_engines,
.suspend = goya_suspend,
.resume = goya_resume,
- .cb_mmap = goya_cb_mmap,
+ .mmap = goya_mmap,
.ring_doorbell = goya_ring_doorbell,
.pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
@@ -5609,7 +5671,11 @@ static const struct hl_asic_funcs goya_funcs = {
.enable_events_from_fw = goya_enable_events_from_fw,
.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
.init_firmware_loader = goya_init_firmware_loader,
- .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram
+ .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
+ .state_dump_init = goya_state_dump_init,
+ .get_sob_addr = &goya_get_sob_addr,
+ .set_pci_memory_regions = goya_set_pci_memory_regions,
+ .get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 80b1d5a9d9f1..9ff6a448f0d4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -98,6 +98,18 @@ struct hl_eq_fw_alive {
__u8 pad[7];
};
+enum hl_pcie_addr_dec_cause {
+ PCIE_ADDR_DEC_HBW_ERR_RESP,
+ PCIE_ADDR_DEC_LBW_ERR_RESP,
+ PCIE_ADDR_DEC_TLP_BLOCKED_BY_RR
+};
+
+struct hl_eq_pcie_addr_dec_data {
+ /* enum hl_pcie_addr_dec_cause */
+ __u8 addr_dec_cause;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
@@ -106,6 +118,7 @@ struct hl_eq_entry {
struct hl_eq_sm_sei_data sm_sei_data;
struct cpucp_pkt_sync_err pkt_sync_err;
struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_pcie_addr_dec_data pcie_addr_dec_data;
__le64 data[7];
};
};
@@ -116,7 +129,7 @@ struct hl_eq_entry {
#define EQ_CTL_READY_MASK 0x80000000
#define EQ_CTL_EVENT_TYPE_SHIFT 16
-#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
#define EQ_CTL_INDEX_SHIFT 0
#define EQ_CTL_INDEX_MASK 0x0000FFFF
@@ -300,7 +313,7 @@ enum pq_init_status {
* The packet's arguments specify the desired sensor and the field to
* set.
*
- * CPUCP_PACKET_PCIE_THROUGHPUT_GET
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
* Get throughput of PCIe.
* The packet's arguments specify the transaction direction (TX/RX).
* The window measurement is 10[msec], and the return value is in KB/sec.
@@ -309,19 +322,19 @@ enum pq_init_status {
* Replay count measures number of "replay" events, which is basicly
* number of retries done by PCIe.
*
- * CPUCP_PACKET_TOTAL_ENERGY_GET
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
* Total Energy is measurement of energy from the time FW Linux
* is loaded. It is calculated by multiplying the average power
* by time (passed from armcp start). The units are in MilliJouls.
*
- * CPUCP_PACKET_PLL_INFO_GET
+ * CPUCP_PACKET_PLL_INFO_GET -
* Fetch frequencies of PLL from the required PLL IP.
* The packet's arguments specify the device PLL type
* Pll type is the PLL from device pll_index enum.
* The result is composed of 4 outputs, each is 16-bit
* frequency in MHz.
*
- * CPUCP_PACKET_POWER_GET
+ * CPUCP_PACKET_POWER_GET -
* Fetch the present power consumption of the device (Current * Voltage).
*
* CPUCP_PACKET_NIC_PFC_SET -
@@ -345,6 +358,24 @@ enum pq_init_status {
* CPUCP_PACKET_MSI_INFO_SET -
* set the index number for each supported msi type going from
* host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
*/
enum cpucp_packet_id {
@@ -385,6 +416,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_NIC_LPBK_SET, /* internal */
CPUCP_PACKET_NIC_MAC_CFG, /* internal */
CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -414,6 +450,11 @@ enum cpucp_packet_id {
#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
/* heartbeat status bits */
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
@@ -467,7 +508,8 @@ struct cpucp_packet {
__le32 status_mask;
};
- __le32 reserved;
+ /* For NIC requests */
+ __le32 port_index;
};
struct cpucp_unmask_irq_arr_packet {
@@ -476,6 +518,12 @@ struct cpucp_unmask_irq_arr_packet {
__le32 irqs[0];
};
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[0];
+};
+
struct cpucp_array_data_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
@@ -595,6 +643,18 @@ enum pll_index {
PLL_MAX
};
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
/* Event Queue Packets */
struct eq_generic_event {
@@ -700,6 +760,15 @@ struct cpucp_mac_addr {
__u8 mac_addr[ETH_ALEN];
};
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ UNKNOWN_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
+};
+
struct cpucp_nic_info {
struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
__le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
@@ -708,6 +777,40 @@ struct cpucp_nic_info {
__le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __u8 reserved[6];
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events
+ * @high_ber_cnt: high ber events
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
};
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index fa8a5ad2d438..3099653234e4 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -78,6 +78,26 @@
* CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
* should be contacted.
*
+ * CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD HALT ACK from ARC0 is not received
+ * within specified retries after issuing
+ * HALT request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD HALT ACK from ARC1 is not received
+ * within specified retries after issuing
+ * HALT request. ARC1 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD RUN ACK from ARC0 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD RUN ACK from ARC1 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC1 appears to be in bad
+ * reset.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -98,6 +118,10 @@
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
+#define CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD (1 << 14)
+#define CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD (1 << 15)
+#define CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD (1 << 16)
+#define CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD (1 << 17)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
#define CPU_BOOT_ERR1_ENABLED (1 << 31)
@@ -186,6 +210,10 @@
* configured and is ready for use.
* Initialized in: ppboot
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
* FW sends to host a bitmap of supported
* PLLs.
@@ -209,6 +237,21 @@
* prevent IRQs overriding each other.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -236,10 +279,14 @@
#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << 18)
#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << 23)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << 24)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << 25)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
@@ -313,10 +360,7 @@ struct cpu_dyn_regs {
__le32 hw_state;
__le32 kmd_msg_to_cpu;
__le32 cpu_cmd_status_to_host;
- union {
- __le32 gic_host_irq_ctrl;
- __le32 gic_host_pi_upd_irq;
- };
+ __le32 gic_host_pi_upd_irq;
__le32 gic_tpc_qm_irq_ctrl;
__le32 gic_mme_qm_irq_ctrl;
__le32 gic_dma_qm_irq_ctrl;
@@ -324,7 +368,9 @@ struct cpu_dyn_regs {
__le32 gic_dma_core_irq_ctrl;
__le32 gic_host_halt_irq;
__le32 gic_host_ints_irq;
- __le32 reserved1[24]; /* reserve for future use */
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 reserved1[22]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -462,6 +508,11 @@ struct lkd_fw_comms_msg {
* Do not wait for BMC response.
*
* COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
*/
enum comms_cmd {
COMMS_NOOP = 0,
@@ -474,6 +525,7 @@ enum comms_cmd {
COMMS_GOTO_WFE = 7,
COMMS_SKIP_BMC = 8,
COMMS_LOW_PLL_OPP = 9,
+ COMMS_PREP_DESC_ELBI = 10,
COMMS_INVLD_LAST
};
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 5bb54b34a8ae..ffdfbd9b3220 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -126,6 +126,9 @@
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x4F4800
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x4F5000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0 0x4F5800
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 9aea7e996654..acc85d3ed98b 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -449,4 +449,21 @@ enum axi_id {
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK 0x1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_SHIFT 1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK 0x1FE
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK 0xFF
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_SHIFT 8
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK 0xFF00
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_SHIFT 16
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_MASK 0x10000
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_SHIFT 17
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK 0xFFFE0000
+#define TPC0_QM_CP_STS_0_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_0_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK 0x400000
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index d95d4162ae2c..b9bd5a7f71eb 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,8 +12,6 @@
* PSOC scratch-pad registers
*/
#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-/* TODO: remove mmGIC_HOST_IRQ_CTRL_POLL_REG */
-#define mmGIC_HOST_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_HOST_PI_UPD_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_TPC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
#define mmGIC_MME_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 95b1c6800a22..fe6fd34b8caf 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/utsname.h>
#define DEFAULT_COUNT 10
@@ -210,6 +211,8 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
+/* For test debug reporting. */
+char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
@@ -490,6 +493,11 @@ static int __init lkdtm_module_init(void)
crash_count = cpoint_count;
#endif
+ /* Common initialization. */
+ lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)",
+ init_uts_ns.name.release,
+ init_uts_ns.name.machine);
+
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
@@ -538,6 +546,8 @@ static void __exit lkdtm_module_exit(void)
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
+ kfree(lkdtm_kernel_info);
+
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index d7d64d9765eb..c212a253edde 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -5,17 +5,17 @@
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
-#define LKDTM_KERNEL "kernel (" UTS_RELEASE " " UTS_MACHINE ")"
+extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
{ \
if (IS_ENABLED(kconfig)) \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
else \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y\n"); \
+ pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
}
#ifndef MODULE
@@ -25,24 +25,30 @@ int lkdtm_check_bool_cmdline(const char *param);
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built with " #kconfig "=y but booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, since this %s was built with " #kconfig "=y but booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y and booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y and booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
} \
} else { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, as this " LKDTM_KERNEL " was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, as this %s was built *without* " #kconfig "=y and booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built *without* " #kconfig "=y but booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("This is probably expected, since this %s was built *without* " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
break; \
} \
} \
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index d1137a95ad02..2ed7e3aaff3a 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,8 @@
#define FLAG_USE_DMA BIT(0)
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_TI_J7200 0xb00f
+#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
@@ -970,6 +972,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
@@ -979,6 +987,7 @@ static struct pci_driver pci_endpoint_test_driver = {
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index 29e8a546dcd6..b9784f3da7a1 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#define EBU_CLC 0x000
@@ -102,7 +103,6 @@
#define MAX_CS 2
-#define HZ_PER_MHZ 1000000L
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 64d6dfa83122..267324889dd6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
reset_control_assert(gphy_fw->reset);
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
if (ret) {
dev_err(dev, "failed to load firmware: %s, error: %i\n",
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 1f63f50f73f1..bda5a9bf4f52 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
{
- struct qca8k_priv *priv = salve_bus->priv;
- struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -682,10 +680,8 @@ exit:
}
static int
-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
+qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
{
- struct qca8k_priv *priv = salve_bus->priv;
- struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -727,6 +723,24 @@ exit:
}
static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ struct mii_bus *bus = priv->bus;
+
+ return qca8k_mdio_write(bus, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ struct mii_bus *bus = priv->bus;
+
+ return qca8k_mdio_read(bus, phy, regnum);
+}
+
+static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
bus->priv = (void *)priv;
bus->name = "qca8k slave mii";
- bus->read = qca8k_mdio_read;
- bus->write = qca8k_mdio_write;
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
ds->index);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 8d90fed5d33e..6f0ea2facea9 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
#ifdef VORTEX_BUS_MASTER
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
- outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+ outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 53660bc8d6ff..9afc712f5948 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
}
}
-#ifdef MODULE
static int __init ne_init(void)
{
int retval;
- ne_add_devices();
+
+ if (IS_MODULE(CONFIG_NE2000))
+ ne_add_devices();
+
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
- if (retval) {
+
+ if (IS_MODULE(CONFIG_NE2000) && retval) {
if (io[0] == 0)
pr_notice("ne.c: You must supply \"io=0xNNN\""
" value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
return retval;
}
module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
- int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
- /* Unregister unused platform_devices. */
- ne_loop_rm_unreg(0);
- return retval;
-}
-module_init(ne_init);
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
struct net_device * __init ne_probe(int unit)
{
int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV);
}
#endif
-#endif /* MODULE */
static void __exit ne_exit(void)
{
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index b5df7ad5a83f..032e8922b482 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
#ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i];
#endif
- buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a705e2615307..8c83973adca5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8038,9 +8038,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
+ unsigned int len;
int rc, i, j;
u8 *data;
- unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -8057,38 +8057,21 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
for (i = 0; i < BNX2_VPD_LEN; i += 4)
swab32s((u32 *)&data[i]);
- i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto vpd_done;
-
- rosize = pci_vpd_lrdt_size(&data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- block_end = i + rosize;
-
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
-
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&data[j], "1028", 4))
+ if (len != 4 || memcmp(&data[j], "1028", 4))
goto vpd_done;
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+ if (len > BNX2_MAX_VER_SLEN)
goto vpd_done;
memcpy(bp->fw_version, &data[j], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d04994840b87..e789430f407c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2407,7 +2407,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
#define VF_ACQUIRE_THRESH 3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6d98134913cd..ae87296ae1ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12189,86 +12189,35 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
- int cnt, i, block_end, rodi;
- char vpd_start[BNX2X_VPD_LEN+1];
- char str_id_reg[VENDOR_ID_LEN+1];
- char str_id_cap[VENDOR_ID_LEN+1];
- char *vpd_data;
- char *vpd_extended_data = NULL;
- u8 len;
-
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
- memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
-
- if (cnt < BNX2X_VPD_LEN)
- goto out_not_found;
-
- /* VPD RO tag should be first tag after identifier string, hence
- * we should be able to find it in first BNX2X_VPD_LEN chars
- */
- i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto out_not_found;
-
- block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_start[i]);
-
- i += PCI_VPD_LRDT_TAG_SIZE;
-
- if (block_end > BNX2X_VPD_LEN) {
- vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
- if (vpd_extended_data == NULL)
- goto out_not_found;
-
- /* read rest of vpd image into vpd_extended_data */
- memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
- cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
- block_end - BNX2X_VPD_LEN,
- vpd_extended_data + BNX2X_VPD_LEN);
- if (cnt < (block_end - BNX2X_VPD_LEN))
- goto out_not_found;
- vpd_data = vpd_extended_data;
- } else
- vpd_data = vpd_start;
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
- /* now vpd_data holds full vpd content in both cases */
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (rodi < 0)
- goto out_not_found;
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
- if (len != VENDOR_ID_LEN)
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
/* vendor specific info */
- snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
- snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
- if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
- !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (rodi >= 0) {
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
- memcpy(bp->fw_ver, &vpd_data[rodi], len);
- bp->fw_ver[len] = ' ';
- }
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
}
- kfree(vpd_extended_data);
- return;
}
out_not_found:
- kfree(vpd_extended_data);
- return;
+ kfree(vpd_data);
}
static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f255fd0b16db..6fbf735fca31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0) {
- err = -EINVAL;
+ err = 0;
goto failed;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9b86516e59a1..037767b370d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
- if (!fw_health->enabled) {
+ if (!fw_health->enabled)
fw_health->last_fw_heartbeat =
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- fw_health->last_fw_reset_cnt =
- bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- }
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
+ if (!txr->tx_buf_ring)
+ continue;
+
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
}
skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
kfree(data);
}
}
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
for (i = 0; i < max_agg_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
__free_page(page);
}
+
+skip_rx_agg_free:
if (rxr->rx_page) {
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
struct pci_dev *pdev = bp->pdev;
int i;
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
+skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
}
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+ bp->fw_health->enabled) {
+ bp->fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
@@ -13100,66 +13125,35 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-#define BNXT_VPD_LEN 512
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size, size;
- ssize_t vpd_size;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
u8 *vpd_data;
- vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
- if (!vpd_data)
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
return;
-
- vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
- if (vpd_size <= 0) {
- netdev_err(bp->dev, "Unable to read VPD\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
}
- ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- if (i + ro_size > vpd_size)
- goto exit;
-
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
if (pos < 0)
goto read_sn;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto read_sn;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_SERIALNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
if (pos < 0)
goto exit;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto exit;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 46fae1acbeed..e6a4a768b10b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8a238e349e02..5e0e0e70d801 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12788,7 +12788,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
}
-static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
{
int i;
__be32 *buf;
@@ -12822,15 +12822,11 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = TG3_NVM_VPD_OFF;
len = TG3_NVM_VPD_LEN;
}
- } else {
- len = TG3_NVM_PCI_VPD_MAX_LEN;
- }
- buf = kmalloc(len, GFP_KERNEL);
- if (buf == NULL)
- return NULL;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
- if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < len; i += 4) {
/* The data is in little-endian format in NVRAM.
* Use the big-endian read routines to preserve
@@ -12841,12 +12837,9 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
}
*vpdlen = len;
} else {
- ssize_t cnt;
-
- cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
- if (cnt < 0)
- goto error;
- *vpdlen = cnt;
+ buf = pci_vpd_alloc(tp->pdev, vpdlen);
+ if (IS_ERR(buf))
+ return NULL;
}
return buf;
@@ -12868,9 +12861,10 @@ error:
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 csum, magic, len;
+ u32 csum, magic;
__be32 *buf;
int i, j, k, err = 0, size;
+ unsigned int len;
if (tg3_flag(tp, NO_NVRAM))
return 0;
@@ -13013,33 +13007,10 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
- if (i > 0) {
- j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
- if (j < 0)
- goto out;
-
- if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
- goto out;
-
- i += PCI_VPD_LRDT_TAG_SIZE;
- j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
- PCI_VPD_RO_KEYWORD_CHKSUM);
- if (j > 0) {
- u8 csum8 = 0;
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- for (i = 0; i <= j; i++)
- csum8 += ((u8 *)buf)[i];
-
- if (csum8)
- goto out;
- }
- }
-
- err = 0;
-
+ err = pci_vpd_check_csum(buf, len);
+ /* go on if no checksum found */
+ if (err == 1)
+ err = 0;
out:
kfree(buf);
return err;
@@ -15624,64 +15595,36 @@ skip_phy_reset:
static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
- unsigned int block_end, rosize, len;
- u32 vpdlen;
- int j, i = 0;
+ unsigned int len, vpdlen;
+ int i;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (i < 0)
- goto out_not_found;
-
- rosize = pci_vpd_lrdt_size(&vpd_data[i]);
- block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
- i += PCI_VPD_LRDT_TAG_SIZE;
+ goto partno;
- if (block_end > vpdlen)
- goto out_not_found;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (j > 0) {
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&vpd_data[j], "1028", 4))
- goto partno;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (j < 0)
- goto partno;
+ if (len != 4 || memcmp(vpd_data + i, "1028", 4))
+ goto partno;
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end)
- goto partno;
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_VENDOR0, &len);
+ if (i < 0)
+ goto partno;
- if (len >= sizeof(tp->fw_ver))
- len = sizeof(tp->fw_ver) - 1;
- memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
- snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
- &vpd_data[j]);
- }
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
partno:
- i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_PARTNO, &len);
if (i < 0)
goto out_not_found;
- len = pci_vpd_info_field_size(&vpd_data[i]);
-
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len > TG3_BPN_SIZE ||
- (len + i) > vpdlen)
+ if (len > TG3_BPN_SIZE)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 46ec4fdfd16a..1000c894064f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,7 +2101,6 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
-#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 8b7b59908a1a..f66d22de5168 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
struct platform_device *plat_dev = pci_get_drvdata(pdev);
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
- platform_device_unregister(plat_dev);
clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk);
+ platform_device_unregister(plat_dev);
}
static const struct pci_device_id dev_id_table[] = {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c6fe0f2a4d0e..f6396ac64006 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -526,7 +526,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
- if (oct->pci_dev->reset_fn)
+ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
octeon_pci_flr(oct);
else
cn23xx_vf_ask_pf_to_do_flr(oct);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9058f09f921e..ecea3cdd30b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,6 @@ extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
PN_LEN = 16, /* Part Number length */
MACADDR_LEN = 12, /* MAC Address length */
@@ -391,7 +390,6 @@ struct tp_params {
struct vpd_params {
unsigned int cclk;
- u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 6606fb8b3e42..64144b6171d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2743,10 +2743,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, ret = 0, addr;
- int ec, sn, pn, na;
- u8 *vpd, csum, base_val = 0;
- unsigned int vpdr_len, kw_offset, id_len;
+ unsigned int id_len, pn_len, sn_len, na_len;
+ int id, sn, pn, na, addr, ret = 0;
+ u8 *vpd, base_val = 0;
vpd = vmalloc(VPD_LEN);
if (!vpd)
@@ -2765,74 +2764,52 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (ret < 0)
goto out;
- if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
- dev_err(adapter->pdev_dev, "missing VPD ID string\n");
- ret = -EINVAL;
+ ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
+ if (ret < 0)
goto out;
- }
+ id = ret;
- id_len = pci_vpd_lrdt_size(vpd);
- if (id_len > ID_LEN)
- id_len = ID_LEN;
-
- i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ ret = pci_vpd_check_csum(vpd, VPD_LEN);
+ if (ret) {
+ dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
ret = -EINVAL;
goto out;
}
- vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
- kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
- if (vpdr_len + kw_offset > VPD_LEN) {
- dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
+ if (ret < 0)
goto out;
- }
-
-#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
- if (var < 0) { \
- dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
- ret = -EINVAL; \
- goto out; \
- } \
- var += PCI_VPD_INFO_FLD_HDR_SIZE; \
-} while (0)
-
- FIND_VPD_KW(i, "RV");
- for (csum = 0; i >= 0; i--)
- csum += vpd[i];
+ sn = ret;
- if (csum) {
- dev_err(adapter->pdev_dev,
- "corrupted VPD EEPROM, actual csum %u\n", csum);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
+ if (ret < 0)
goto out;
- }
+ pn = ret;
- FIND_VPD_KW(ec, "EC");
- FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(pn, "PN");
- FIND_VPD_KW(na, "NA");
-#undef FIND_VPD_KW
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
+ if (ret < 0)
+ goto out;
+ na = ret;
- memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+ memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->ec, vpd + ec, EC_LEN);
- strim(p->ec);
- i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
strim(p->sn);
- i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
strim((char *)p->na);
out:
vfree(vpd);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error reading VPD\n");
+ return ret;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 22af3d6ce178..adc54a726661 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table))
#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_OUTER_VLAN_TAG 2
#define HNS3_MIN_TX_LEN 33U
+#define HNS3_MIN_TUN_PKT_LEN 65U
/* hns3_pci_tbl - PCI Device ID Table
*
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
l4.tcp->doff);
break;
case IPPROTO_UDP:
- if (hns3_tunnel_csum_bug(skb))
- return skb_checksum_help(skb);
+ if (hns3_tunnel_csum_bug(skb)) {
+ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+ return ret ? ret : skb_checksum_help(skb);
+ }
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_alloc_page_pool(ring);
+ if (page_pool_enabled)
+ hns3_alloc_page_pool(ring);
ret = hns3_alloc_ring_buffers(ring);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 61d30b2961f4..a983d012e26f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
}
bd_num = le32_to_cpu(req->bd_num);
+ if (!bd_num) {
+ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+ return -EINVAL;
+ }
desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6d5f84666fa5..a0d0fa4f6a24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1531,9 +1531,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
unsigned int i;
- int ret;
+ int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1601,11 +1602,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
- /* Set the init affinity based on pci func number */
- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
- &hdev->affinity_mask);
+ /* Set the affinity based on numa node */
+ node = dev_to_node(&hdev->pdev->dev);
+ if (node != NUMA_NO_NODE)
+ cpumask = cpumask_of_node(node);
+
+ cpumask_copy(&hdev->affinity_mask, cpumask);
return ret;
}
@@ -8131,11 +8133,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock);
- /* If it is not PF reset, the firmware will disable the MAC,
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET) {
+ hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 82e727020120..a69e892277b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
hclgevf_enable_vector(&hdev->misc_vector, false);
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ hclgevf_clear_event_cause(hdev, clearval);
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
- hclgevf_clear_event_cause(hdev, clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b8a40146b895..b482f6f633bd 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d1be883f933a..8f17096e614d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4890,6 +4890,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index eadcb9958346..3c4f08d20414 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf);
}
}
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 1f2afdf6cd48..adcc9a251595 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
int ret;
+ /* if this PF doesn't support a technology that requires auxiliary
+ * devices, then gracefully exit
+ */
+ if (!ice_is_aux_ena(pf))
+ return 0;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b877efae61df..0e19b4d02e62 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= netdev->features;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dde8c03c5cfe..7d56a927081d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -656,11 +656,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
int err;
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
err = devlink_param_register(devlink, &enable_rdma_param);
@@ -676,9 +675,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
devlink_param_unregister(devlink, &enable_rdma_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 3f8a98093f8c..f9cf9fb31547 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
- return err;
+ goto err_cancel_work;
}
err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 669a75f3537a..7b8c8187543a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0c38c2e319be..b5ddaa82755f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
int ifindex = upper->ifindex;
- int err;
+ int err = 0;
if (!netif_is_bridge_master(upper))
return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
const struct switchdev_attr *attr = port_attr_info->attr;
u16 vport_num, esw_owner_vhca_id;
- int err;
+ int err = 0;
if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
&esw_owner_vhca_id))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 51a4d80f7fa3..de03684528bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
{
struct mlx5e_rep_indr_block_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index bf0313e2682b..13056cb9757d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
- if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+ if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 2cfd12953909..306fb5d6a36d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return set_pflag_cqe_based_moder(netdev, enable, true);
}
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support &&
- priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (rx_filter)
new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_filter;
int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+ rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47efd858964d..3fd515e7bf30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
if (!rx_filter)
/* Reset CQE compression to Admin default */
- return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0;
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9fe8e3c204d6..fe501ba88bea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
+ rcu_read_unlock();
free_match_list(match_head, ft_locked);
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
-out:
rcu_read_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 49ca57c6d31d..ca5690b0a7ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
mlx5_dev_list_lock();
- ldev = mlx5_lag_dev(dev);
dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
+ mlx5_dev_list_lock();
ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3e85b17f5857..6704f5c1aa32 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_clean_port(priv);
if (err)
goto free_irqs;
+
+ /* Clear driver's valid_polarity to match hardware,
+ * since the above call to clean_port() resets the
+ * receive polarity used by hardware.
+ */
+ priv->valid_polarity = 0;
+
err = mlxbf_gige_rx_init(priv);
if (err)
goto free_irqs;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index c1310ea1c216..d5c485a6d284 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
int err;
u16 i;
- dma_buf = kzalloc(sizeof(*dma_buf) +
- q_depth * sizeof(struct hwc_work_request),
- GFP_KERNEL);
+ dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 556c3495211d..64c0ef57ad42 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
if (cb_priv->netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 6e5a6cc97d0e..24cd41567775 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att)
{
enum nvm_image_type type;
+ int rc;
u32 i;
/* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- qed_mcp_nvm_info_populate(p_hwfn);
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc)
+ return rc;
+
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 0a2f34fc8b24..27dffa299ca6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache, *temp;
- int i, ret = -EIO;
__le32 *temp_le;
u8 data[16];
size_t size;
+ int i, ret;
u64 addr;
temp = vzalloc(fw->size);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4b2eca5e08e2..01ef5efd7bc2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -119,6 +119,8 @@
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
+#define MD_CSC 0xb6 /* MDC speed control register */
+#define MD_CSC_DEFAULT 0x0030
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
{
void __iomem *ioaddr = lp->base;
int limit = MAC_DEF_TIMEOUT;
- u16 cmd;
+ u16 cmd, md_csc;
+ md_csc = ioread16(ioaddr + MD_CSC);
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
+
+ /* Restore MDIO clock frequency */
+ if (md_csc != MD_CSC_DEFAULT)
+ iowrite16(md_csc, ioaddr + MD_CSC);
}
static void r6040_init_mac_regs(struct net_device *dev)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a295e2621cf3..43ef4f529028 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -900,74 +900,36 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* NIC VPD information
* Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * installed NIC.
*/
-#define SFC_VPD_LEN 512
static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index e5b0d795c301..3dbea028b325 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
*/
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ if (n_channels >= max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
} else if (n_channels + n_xdp_tx > efx->max_vis) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
- n_xdp_tx, n_channels, efx->max_vis);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+ n_xdp_tx, n_channels, efx->max_vis);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+ } else if (n_channels + n_xdp_ev > max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+
+ n_xdp_ev = max_channels - n_channels;
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+ DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
} else {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+ }
+
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
+ n_xdp_ev * tx_per_ev, n_xdp_ev);
+ } else {
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = n_xdp_tx;
}
if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
goto out;
}
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
int efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
-
- /* We may have a few left-over XDP TX
- * queues owing to xdp_tx_queue_count
- * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
- * We still allocate and probe those
- * TXQs, but never use them.
- */
- if (xdp_queue_number < efx->xdp_tx_queue_count) {
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
xdp_queue_number++;
- }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
channel->channel, tx_queue->label,
tx_queue->queue);
}
+
+ /* If XDP is borrowing queues from net stack, it must use the queue
+ * with no csum offload, which is the first one of the channel
+ * (note: channel->tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
}
}
}
- WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index c177ea0f301e..423bdf81200f 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2780,75 +2780,36 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
};
/* NIC VPD information
- * Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * Called during probe to display the part number of the installed NIC.
*/
-#define SFC_VPD_LEN 512
static void ef4_probe_vpd_strings(struct ef4_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9b4b25704271..f6981810039d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
#define EFX_RPS_MAX_IN_FLIGHT 8
#endif /* CONFIG_RFS_ACCEL */
+enum efx_xdp_tx_queues_mode {
+ EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */
+ EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */
+ EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
+};
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
* should be allocated for this NIC
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
+ enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
unsigned rxq_entries;
unsigned txq_entries;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0c6650d2e239..d16e031e95f4 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
unsigned int len;
int space;
int cpu;
- int i;
+ int i = 0;
- cpu = raw_smp_processor_id();
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+ if (unlikely(!n))
+ return 0;
- if (!efx->xdp_tx_queue_count ||
- unlikely(cpu >= efx->xdp_tx_queue_count))
+ cpu = raw_smp_processor_id();
+ if (unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
- if (unlikely(n && !xdpfs))
- return -EINVAL;
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
- if (!n)
- return 0;
+ /* If we're borrowing net stack queues we have to handle stop-restart
+ * or we might block the queue and it will be considered as frozen
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ if (netif_tx_queue_stopped(tx_queue->core_txq))
+ goto unlock;
+ efx_tx_maybe_stop_queue(tx_queue);
+ }
/* Check for available space. We should never need multiple
* descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
+unlock:
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
return i == 0 ? -EIO : i;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ece02b35a6ce..553c4403258a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_100_150M;
else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
@@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
- int ret;
if (!ndev || !netif_running(ndev))
return 0;
@@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev)
} else {
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
- /* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
- ret = pm_runtime_force_suspend(dev);
- if (ret) {
- mutex_unlock(&priv->lock);
- return ret;
- }
}
mutex_unlock(&priv->lock);
@@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
- /* enable the clk previously disabled */
- ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
- if (priv->plat->clk_ptp_ref)
- clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5ca710844cc1..62cec9bfcd33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -9,6 +9,7 @@
*******************************************************************************/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
return stmmac_bus_clks_config(priv, true);
}
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* Disable clock in case of PWM is off */
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* enable the clk previously disabled */
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
+ }
+
+ return 0;
+}
+
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8fe8887d506a..6192244b304a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -68,9 +68,9 @@
#define SIXP_DAMA_OFF 0
/* default level 2 parameters */
-#define SIXP_TXDELAY (HZ/4) /* in 1 s */
+#define SIXP_TXDELAY 25 /* 250 ms */
#define SIXP_PERSIST 50 /* in 256ths */
-#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
+#define SIXP_SLOTTIME 10 /* 100 ms */
#define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
#define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2324e1b93e37..1da334f54944 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* table region determines the number of entries it has.
*/
if (filter) {
- count = hweight32(ipa->filter_map);
+ /* Include one extra "slot" to hold the filter map itself */
+ count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
count = mem->size / sizeof(__le64);
diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
index 21aa24c741b9..daae7fa58fb8 100644
--- a/drivers/net/phy/dp83640_reg.h
+++ b/drivers/net/phy/dp83640_reg.h
@@ -5,7 +5,7 @@
#ifndef HAVE_DP83640_REGISTERS
#define HAVE_DP83640_REGISTERS
-#define PAGE0 0x0000
+/* #define PAGE0 0x0000 */
#define PHYCR2 0x001c /* PHY Control Register 2 */
#define PAGE4 0x0004
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9e2891d8e8dd..ba5ad86ec826 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
+ struct device_driver *drv = phydev->mdio.dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
struct net_device *netdev = phydev->attached_dev;
- if (!phydev->drv->suspend)
+ if (!drv || !phydrv->suspend)
return false;
/* PHY not attached? May suspend if the PHY has not already been
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a1464b764d4d..0a0abe8e4be0 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* If this link is with an SFP, ensure that changes to advertised modes
+ * also cause the associated interface to be selected such that the
+ * link can be configured correctly.
+ */
+ if (pl->sfp_port && pl->sfp_bus) {
+ config.interface = sfp_select_interface(pl->sfp_bus,
+ config.advertising);
+ if (config.interface == PHY_INTERFACE_MODE_NA) {
+ phylink_err(pl,
+ "selection of interface failed, advertisement %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ config.advertising);
+ return -EINVAL;
+ }
+
+ /* Revalidate with the selected interface */
+ linkmode_copy(support, pl->supported);
+ if (phylink_validate(pl, support, &config)) {
+ phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
+ phylink_an_mode_str(pl->cur_link_an_mode),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ return -EINVAL;
+ }
+ }
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
- changed = !linkmode_equal(pl->supported, support);
+ changed = !linkmode_equal(pl->supported, support) ||
+ !linkmode_equal(pl->link_config.advertising,
+ config.advertising);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index f6b92efffc94..480bcd1f6c1c 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
+CROSS_COMPILE_M68K = m68k-linux-gnu-
+
ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
ifeq ($(ARCH),m68k)
M68KCC = $(CC)
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9251441fd8a3..7f473f9db300 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd,
free, nslot);
}
+static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum, sum_save;
+
+ if (!namespace_label_has(ndd, checksum))
+ return true;
+
+ sum_save = nsl_get_checksum(ndd, nd_label);
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum_save);
+ return sum == sum_save;
+}
+
+static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum;
+
+ if (!namespace_label_has(ndd, checksum))
+ return;
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum);
+}
+
static bool slot_valid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 slot)
{
+ bool valid;
+
/* check that we are written where we expect to be written */
- if (slot != __le32_to_cpu(nd_label->slot))
+ if (slot != nsl_get_slot(ndd, nd_label))
return false;
-
- /* check checksum */
- if (namespace_label_has(ndd, checksum)) {
- u64 sum, sum_save;
-
- sum_save = __le64_to_cpu(nd_label->checksum);
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum_save);
- if (sum != sum_save) {
- dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
- slot, sum);
- return false;
- }
- }
-
- return true;
+ valid = nsl_validate_checksum(ndd, nd_label);
+ if (!valid)
+ dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
+ return valid;
}
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
@@ -395,13 +411,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
continue;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (test_bit(NDD_NOBLK, &nvdimm->flags))
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
- __le64_to_cpu(nd_label->dpa),
- __le64_to_cpu(nd_label->rawsize));
+ nsl_get_dpa(ndd, nd_label),
+ nsl_get_rawsize(ndd, nd_label));
nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
if (!res)
return -EBUSY;
@@ -548,9 +564,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot)) {
- u32 label_slot = __le32_to_cpu(nd_label->slot);
- u64 size = __le64_to_cpu(nd_label->rawsize);
- u64 dpa = __le64_to_cpu(nd_label->dpa);
+ u32 label_slot = nsl_get_slot(ndd, nd_label);
+ u64 size = nsl_get_rawsize(ndd, nd_label);
+ u64 dpa = nsl_get_dpa(ndd, nd_label);
dev_dbg(ndd->dev,
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
@@ -708,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
+static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
@@ -756,6 +772,45 @@ static void reap_victim(struct nd_mapping *nd_mapping,
victim->label = NULL;
}
+static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (namespace_label_has(ndd, type_guid))
+ guid_copy(&nd_label->type_guid, guid);
+}
+
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+ if (!guid_equal(&nd_label->type_guid, guid)) {
+ dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
+ &nd_label->type_guid);
+ return false;
+ }
+ return true;
+}
+
+static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ enum nvdimm_claim_class claim_class)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return;
+ guid_copy(&nd_label->abstraction_guid,
+ to_abstraction_guid(claim_class,
+ &nd_label->abstraction_guid));
+}
+
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return NVDIMM_CCLASS_NONE;
+ return to_nvdimm_cclass(&nd_label->abstraction_guid);
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -797,29 +852,18 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
- if (nspm->alt_name)
- memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(flags);
- nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
- nd_label->position = __cpu_to_le16(pos);
- nd_label->isetcookie = __cpu_to_le64(cookie);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nspm->alt_name);
+ nsl_set_flags(ndd, nd_label, flags);
+ nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
+ nsl_set_position(ndd, nd_label, pos);
+ nsl_set_isetcookie(ndd, nd_label, cookie);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
nd_dbg_dpa(nd_region, ndd, res, "\n");
/* update label */
@@ -879,9 +923,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
struct resource *res;
for_each_dpa_resource(ndd, res) {
- if (res->start != __le64_to_cpu(nd_label->dpa))
+ if (res->start != nsl_get_dpa(ndd, nd_label))
continue;
- if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
+ if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
continue;
return res;
}
@@ -890,6 +934,59 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
}
/*
+ * Use the presence of the type_guid as a flag to determine isetcookie
+ * usage and nlabel + position policy for blk-aperture namespaces.
+ */
+static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (namespace_label_has(ndd, type_guid)) {
+ nsl_set_isetcookie(ndd, nd_label, isetcookie);
+ return;
+ }
+ nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+
+ if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
+ dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
+ nsl_get_isetcookie(ndd, nd_label));
+ return false;
+ }
+
+ return true;
+}
+
+static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, int nlabel,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
+ return;
+ }
+ nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
+}
+
+static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_position(ndd, nd_label, 0);
+ return;
+ }
+ nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
+}
+
+/*
* 1/ Account all the labels that can be freed after this update
* 2/ Allocate and write the label to the staging (next) index
* 3/ Record the resources in the namespace device
@@ -1017,50 +1114,21 @@ static int __blk_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
- if (nsblk->alt_name)
- memcpy(nd_label->name, nsblk->alt_name,
- NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
-
- /*
- * Use the presence of the type_guid as a flag to
- * determine isetcookie usage and nlabel + position
- * policy for blk-aperture namespaces.
- */
- if (namespace_label_has(ndd, type_guid)) {
- if (i == min_dpa_idx) {
- nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
- nd_label->position = __cpu_to_le16(0);
- } else {
- nd_label->nlabel = __cpu_to_le16(0xffff);
- nd_label->position = __cpu_to_le16(0xffff);
- }
- nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
- } else {
- nd_label->nlabel = __cpu_to_le16(0); /* N/A */
- nd_label->position = __cpu_to_le16(0); /* N/A */
- nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
- }
-
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
-
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label,
- sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nsblk->alt_name);
+ nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
+
+ nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
+ i == min_dpa_idx);
+ nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
+ nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
+
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
/* update label */
offset = nd_label_offset(ndd, nd_label);
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 956b6d1bd8cc..31f94fad7b92 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 745478213ff2..4cec171c934d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
if (!nd_label)
continue;
nd_label_gen_id(&label_id, nd_label->uuid,
- __le32_to_cpu(nd_label->flags));
+ nsl_get_flags(ndd, nd_label));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
u16 position, nlabel;
- u64 isetcookie;
if (!nd_label)
continue;
- isetcookie = __le64_to_cpu(nd_label->isetcookie);
- position = __le16_to_cpu(nd_label->position);
- nlabel = __le16_to_cpu(nd_label->nlabel);
+ position = nsl_get_position(ndd, nd_label);
+ nlabel = nsl_get_nlabel(ndd, nd_label);
- if (isetcookie != cookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
continue;
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
- if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
+ if (!nsl_validate_type_guid(ndd, nd_label,
+ &nd_set->type_guid))
continue;
- }
if (found_uuid) {
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
*/
hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size;
- pmem_start = __le64_to_cpu(nd_label->dpa);
- pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+ pmem_start = nsl_get_dpa(ndd, nd_label);
+ pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
if (pmem_start >= hw_start && pmem_start < hw_end
&& pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex,
- struct nd_namespace_label *nd_label)
+ struct nd_mapping *nd_mapping,
+ struct nd_namespace_label *nd_label)
{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_index *nsindex =
+ to_namespace_index(ndd, ndd->ns_current);
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm;
- struct nd_mapping *nd_mapping;
resource_size_t size = 0;
struct resource *res;
struct device *dev;
@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
return ERR_PTR(-ENXIO);
}
- if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nd_label->uuid);
- if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
continue;
}
- size += __le64_to_cpu(label0->rawsize);
- if (__le16_to_cpu(label0->position) != 0)
+ ndd = to_ndd(nd_mapping);
+ size += nsl_get_rawsize(ndd, label0);
+ if (nsl_get_position(ndd, label0) != 0)
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
- nspm->alt_name = kmemdup((void __force *) label0->name,
- NSLABEL_NAME_LEN, GFP_KERNEL);
+ nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
+ NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL);
- nspm->lbasize = __le64_to_cpu(label0->lbasize);
- ndd = to_ndd(nd_mapping);
- if (namespace_label_has(ndd, abstraction_guid))
- nspm->nsio.common.claim_class
- = to_nvdimm_cclass(&label0->abstraction_guid);
-
+ nspm->lbasize = nsl_get_lbasize(ndd, label0);
+ nspm->nsio.common.claim_class =
+ nsl_get_claim_class(ndd, label0);
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
if (is_namespace_blk(devs[i])) {
res = nsblk_add_resource(nd_region, ndd,
to_nd_namespace_blk(devs[i]),
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
return -ENXIO;
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
struct device *dev = NULL;
struct resource *res;
- if (namespace_label_has(ndd, type_guid)) {
- if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
- return ERR_PTR(-EAGAIN);
- }
-
- if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
- dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
- nd_set->cookie2,
- __le64_to_cpu(nd_label->isetcookie));
- return ERR_PTR(-EAGAIN);
- }
- }
+ if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
+ return ERR_PTR(-EAGAIN);
+ if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
+ return ERR_PTR(-EAGAIN);
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
if (!nsblk)
@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
dev->type = &namespace_blk_device_type;
dev->parent = &nd_region->dev;
nsblk->id = -1;
- nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
- nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
- GFP_KERNEL);
- if (namespace_label_has(ndd, abstraction_guid))
- nsblk->common.claim_class
- = to_nvdimm_cclass(&nd_label->abstraction_guid);
+ nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
+ nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
+ nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
if (!nsblk->uuid)
goto blk_err;
- memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+ nsl_get_name(ndd, nd_label, name);
if (name[0]) {
- nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
- GFP_KERNEL);
+ nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
if (!nsblk->alt_name)
goto blk_err;
}
res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
goto blk_err;
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
struct device *dev, **devs = NULL;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
/* "safe" because create_namespace_pmem() might list_move() label_ent */
@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (!nd_label)
continue;
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (is_nd_blk(&nd_region->dev)
== !!(flags & NSLABEL_FLAG_LOCAL))
/* pass, region matches label type */;
@@ -2363,9 +2342,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
/* skip labels that describe extents outside of the region */
- if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
- __le64_to_cpu(nd_label->dpa) > map_end)
- continue;
+ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
+ nsl_get_dpa(ndd, nd_label) > map_end)
+ continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0)
@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (is_nd_blk(&nd_region->dev))
dev = create_namespace_blk(nd_region, nd_label, count);
- else {
- struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_namespace_index *nsindex;
-
- nsindex = to_namespace_index(ndd, ndd->ns_current);
- dev = create_namespace_pmem(nd_region, nsindex, nd_label);
- }
+ else
+ dev = create_namespace_pmem(nd_region, nd_mapping,
+ nd_label);
if (IS_ERR(dev)) {
switch (PTR_ERR(dev)) {
@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region)
break;
label = nd_label_active(ndd, j);
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
- u32 flags = __le32_to_cpu(label->flags);
+ u32 flags = nsl_get_flags(ndd, label);
flags &= ~NSLABEL_FLAG_LOCAL;
- label->flags = __cpu_to_le32(flags);
+ nsl_set_flags(ndd, label, flags);
}
label_ent->label = label;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 696b55556d4d..5467ebbb4a6b 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -35,6 +35,156 @@ struct nvdimm_drvdata {
struct kref kref;
};
+static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return nd_label->name;
+}
+
+static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+}
+
+static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ if (!name)
+ return NULL;
+ return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
+}
+
+static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->slot);
+}
+
+static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 slot)
+{
+ nd_label->slot = __cpu_to_le32(slot);
+}
+
+static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->checksum);
+}
+
+static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 checksum)
+{
+ nd_label->checksum = __cpu_to_le64(checksum);
+}
+
+static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->flags);
+}
+
+static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 flags)
+{
+ nd_label->flags = __cpu_to_le32(flags);
+}
+
+static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->dpa);
+}
+
+static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u64 dpa)
+{
+ nd_label->dpa = __cpu_to_le64(dpa);
+}
+
+static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->rawsize);
+}
+
+static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 rawsize)
+{
+ nd_label->rawsize = __cpu_to_le64(rawsize);
+}
+
+static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ nd_label->isetcookie = __cpu_to_le64(isetcookie);
+}
+
+static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 cookie)
+{
+ return cookie == __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->position);
+}
+
+static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 position)
+{
+ nd_label->position = __cpu_to_le16(position);
+}
+
+
+static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->nlabel);
+}
+
+static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 nlabel)
+{
+ nd_label->nlabel = __cpu_to_le16(nlabel);
+}
+
+static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->lbasize);
+}
+
+static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 lbasize)
+{
+ nd_label->lbasize = __cpu_to_le64(lbasize);
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie);
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid);
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label);
+
struct nd_region_data {
int ns_count;
int ns_active;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1e0615b8565e..72de88ff0d30 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -450,11 +450,11 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
+ addr = devm_memremap(dev, pmem->phys_addr,
+ pmem->size, ARCH_MEMREMAP_PMEM);
if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap))
return -ENOMEM;
- addr = devm_memremap(dev, pmem->phys_addr,
- pmem->size, ARCH_MEMREMAP_PMEM);
bb_range.start = res->start;
bb_range.end = res->end;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8679a108f571..7efb31b87f37 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -116,6 +116,8 @@ static struct class *nvme_ns_chr_class;
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
/*
* Prepare a queue for teardown.
@@ -1152,7 +1154,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
@@ -1167,6 +1170,26 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work);
}
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
+ case NVME_FEAT_KATO:
+ /*
+ * Keep alive commands interval on the host should be
+ * updated when KATO is modified by Set Features
+ * commands.
+ */
+ if (!status)
+ nvme_update_keep_alive(ctrl, cmd);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
}
int nvme_execute_passthru_rq(struct request *rq)
@@ -1181,7 +1204,7 @@ int nvme_execute_passthru_rq(struct request *rq)
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(disk, rq, false);
if (effects) /* nothing to be done for zero cmd effects */
- nvme_passthru_end(ctrl, effects);
+ nvme_passthru_end(ctrl, effects, cmd, ret);
return ret;
}
@@ -1269,6 +1292,21 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd)
+{
+ unsigned int new_kato =
+ DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
+
+ dev_info(ctrl->device,
+ "keep alive interval updated from %u ms to %u ms\n",
+ ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
+
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = new_kato;
+ nvme_start_keep_alive(ctrl);
+}
+
/*
* In NVMe 1.0 the CNS field was just a binary controller or namespace
* flag, thus sending any new CNS opcodes has a big chance of not working.
@@ -1302,11 +1340,6 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
-static bool nvme_multi_css(struct nvme_ctrl *ctrl)
-{
- return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
-}
-
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
struct nvme_ns_id_desc *cur, bool *csi_seen)
{
@@ -1874,6 +1907,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze;
}
+ set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
@@ -1885,6 +1919,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
+ nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
@@ -3763,7 +3798,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ goto out_cleanup_ns_from_list;
+
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
@@ -3773,6 +3810,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
return;
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+ down_write(&ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ctrl->namespaces_rwsem);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3795,6 +3837,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ clear_bit(NVME_NS_READY, &ns->flags);
set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
@@ -3802,9 +3845,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
- synchronize_rcu(); /* guarantee not available in head->list */
- nvme_mpath_clear_current_path(ns);
- synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+ /* guarantee not available in head->list */
+ synchronize_rcu();
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 37ce3e8b1db2..5d7bc58a27bd 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -147,6 +147,21 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
mutex_unlock(&ctrl->scan_lock);
}
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ sector_t capacity = get_capacity(head->disk);
+ int node;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (capacity != get_capacity(ns->disk))
+ clear_bit(NVME_NS_READY, &ns->flags);
+ }
+
+ for_each_node(node)
+ rcu_assign_pointer(head->current_path[node], NULL);
+}
+
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
/*
@@ -158,7 +173,7 @@ static bool nvme_path_is_disabled(struct nvme_ns *ns)
ns->ctrl->state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags))
+ !test_bit(NVME_NS_READY, &ns->flags))
return true;
return false;
}
@@ -465,6 +480,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->subsys->instance, head->instance);
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
blk_set_stacking_limits(&head->disk->queue->limits);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a2e1f298b217..9871c0c9374c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -456,6 +456,7 @@ struct nvme_ns {
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
struct cdev cdev;
struct device cdev_device;
@@ -748,6 +749,7 @@ void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
@@ -795,6 +797,9 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
}
+static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+}
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
@@ -887,4 +892,9 @@ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
+static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 645025620154..e2ab12f3f51c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -45,6 +45,7 @@ struct nvme_tcp_request {
u32 pdu_len;
u32 pdu_sent;
u16 ttag;
+ __le16 status;
struct list_head entry;
struct llist_node lentry;
__le32 ddgst;
@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
+ struct nvme_tcp_request *req;
struct request *rq;
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
+ req->status = cqe->status;
+
+ if (!nvme_try_complete_req(rq, req->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq,
+ le16_to_cpu(req->status));
queue->nr_cqe++;
}
nvme_tcp_init_recv_ctx(queue);
@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return 0;
if (queue->recv_ddgst != queue->exp_ddgst) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+
dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst),
le32_to_cpu(queue->exp_ddgst));
- return -EIO;
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++;
}
@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return ret;
req->state = NVME_TCP_SEND_CMD_PDU;
+ req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0;
req->data_sent = 0;
req->pdu_len = 0;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 0cb98f2bbc8c..aa6d84d8848e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1015,7 +1015,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 273555127188..d784f3c200b4 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1028,7 +1028,7 @@ nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
}
/* passthru subsystems use the underlying controller's version */
- if (nvmet_passthru_ctrl(subsys))
+ if (nvmet_is_passthru_subsys(subsys))
return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
@@ -1067,7 +1067,8 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
}
static ssize_t
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 66d05eecc2a9..b8425fa34300 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -553,7 +553,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
- if (nvmet_passthru_ctrl(subsys)) {
+ if (nvmet_is_passthru_subsys(subsys)) {
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
goto out_unlock;
}
@@ -869,7 +869,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_io_cmd(req);
ret = nvmet_req_find_ns(req);
@@ -1206,6 +1206,9 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
}
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
@@ -1363,8 +1366,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
-
ctrl->port = req->port;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
@@ -1378,6 +1379,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 06dd3d537f07..7143c7fa7464 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -582,7 +582,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return subsys->passthru_ctrl;
}
@@ -601,18 +601,19 @@ static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
return 0;
}
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return NULL;
}
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
-static inline struct nvme_ctrl *
-nvmet_req_passthru_ctrl(struct nvmet_req *req)
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(nvmet_req_subsys(req));
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
}
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 225cd1ffbe45..f0efb3537989 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -20,6 +20,16 @@ MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
*/
static DEFINE_XARRAY(passthru_subsystems);
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -218,7 +228,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
@@ -299,7 +309,7 @@ out:
*/
static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct nvme_feat_host_behavior *host;
u16 status = NVME_SC_INTERNAL;
int ret;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 0c0dc2e369c0..3fd74bb34819 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1444,6 +1444,9 @@ static int of_fwnode_add_links(struct fwnode_handle *fwnode)
struct property *p;
struct device_node *con_np = to_of_node(fwnode);
+ if (IS_ENABLED(CONFIG_X86))
+ return 0;
+
if (!con_np)
return -EINVAL;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 889d7ce282eb..952a92504df6 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -156,15 +156,6 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
return container_of(hba, struct dino_device, hba);
}
-/* Check if PCI device is behind a Card-mode Dino. */
-static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
-{
- struct dino_device *dino_dev;
-
- dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
- return is_card_dino(&dino_dev->hba.dev->id);
-}
-
/*
* Dino Configuration Space Accessor Functions
*/
@@ -447,6 +438,15 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
#ifdef CONFIG_TULIP
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+ struct dino_device *dino_dev;
+
+ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+ return is_card_dino(&dino_dev->hba.dev->id);
+}
+
static void pci_fixup_tulip(struct pci_dev *dev)
{
if (!pci_dev_is_behind_card_dino(dev))
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6d7d64939f82..c967ad6e2626 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -376,7 +376,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path)
+ if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5e1e3796efa4..326f7d13024f 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -40,6 +40,7 @@ config PCI_FTPCI100
config PCI_IXP4XX
bool "Intel IXP4xx PCI controller"
depends on ARM && OF
+ depends on ARCH_IXP4XX || COMPILE_TEST
default ARCH_IXP4XX
help
Say Y here if you want support for the PCI host controller found
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 35e61048e133..ffb176d288cd 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -27,6 +27,7 @@
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
+#define J7200_LINK_DOWN BIT(10)
#define J721E_PCIE_USER_CMD_STATUS 0x4
#define LINK_TRAINING_ENABLE BIT(0)
@@ -57,6 +58,7 @@ struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
+ u32 linkdown_irq_regfield;
};
enum j721e_pcie_mode {
@@ -66,7 +68,10 @@ enum j721e_pcie_mode {
struct j721e_pcie_data {
enum j721e_pcie_mode mode;
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+ u32 linkdown_irq_regfield;
+ unsigned int byte_access_allowed:1;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -98,12 +103,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
- if (!(reg & LINK_DOWN))
+ if (!(reg & pcie->linkdown_irq_regfield))
return IRQ_NONE;
dev_err(dev, "LINK DOWN!\n");
- j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
return IRQ_HANDLED;
}
@@ -112,7 +117,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
- reg |= LINK_DOWN;
+ reg |= pcie->linkdown_irq_regfield;
j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
}
@@ -284,10 +289,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
static const struct j721e_pcie_data j721e_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+};
+
+static const struct j721e_pcie_data j7200_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data j7200_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .quirk_detect_quiet_flag = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -299,6 +330,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j721e-pcie-ep",
.data = &j721e_pcie_ep_data,
},
+ {
+ .compatible = "ti,j7200-pcie-host",
+ .data = &j7200_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j7200-pcie-ep",
+ .data = &j7200_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-host",
+ .data = &am64_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-ep",
+ .data = &am64_pcie_ep_data,
+ },
{},
};
@@ -332,6 +379,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
pcie->mode = mode;
+ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
if (IS_ERR(base))
@@ -391,9 +439,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- bridge->ops = &cdns_ti_pcie_host_ops;
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
rc = pci_host_bridge_priv(bridge);
rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &rc->pcie;
cdns_pcie->dev = dev;
@@ -459,6 +509,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_get_sync;
}
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 897cdde02bd8..88e05b9c2e5b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -16,11 +16,37 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
+
+ if (vfn == 0)
+ return fn;
+
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+
+ return fn;
+}
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return -EINVAL;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -47,7 +73,7 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -92,32 +118,36 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
- cdns_pcie_writel(pcie, reg, cfg);
-
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = epf_bar;
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -126,29 +156,32 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
enum pci_barno bar = epf_bar->barno;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
}
- ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
- cdns_pcie_writel(pcie, reg, cfg);
-
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -161,6 +194,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return -EINVAL;
}
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -169,7 +203,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -189,13 +223,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
@@ -209,13 +245,15 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -230,13 +268,15 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return mme;
}
-static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
@@ -247,14 +287,17 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
@@ -274,8 +317,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
return 0;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
unsigned long flags;
@@ -317,7 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -334,7 +378,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
@@ -343,6 +387,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -382,7 +428,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset)
@@ -396,6 +442,8 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
int ret;
int i;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -419,7 +467,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
pci_addr &= GENMASK_ULL(63, 2);
for (i = 0; i < interrupt_num; i++) {
- ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
pci_addr & ~pci_addr_mask,
entry_size);
if (ret)
@@ -433,7 +481,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
@@ -446,6 +494,12 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
u16 flags;
u8 bir;
+ epf = &ep->epf[fn];
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI-X feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
if (!(flags & PCI_MSIX_FLAGS_ENABLE))
@@ -456,7 +510,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
bir = tbl_offset & PCI_MSIX_TABLE_BIR;
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf = &ep->epf[fn];
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
@@ -478,21 +531,27 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -523,6 +582,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
@@ -531,9 +597,12 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -559,9 +628,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
@@ -606,6 +677,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (!ep->epf)
return -ENOMEM;
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
@@ -623,6 +713,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
/* Reserve region 0 for IRQs */
set_bit(0, &ep->ob_region_map);
+
+ if (ep->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+
spin_lock_init(&ep->lock);
return 0;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index ae1c55503513..fb96d37a135c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -498,6 +498,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 3c3646502d05..52767f26048f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -7,6 +7,22 @@
#include "pcie-cadence.h"
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /*
+ * Set the LTSSM Detect Quiet state min. delay to 2ms.
+ */
+ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
+}
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size)
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 30db2d68c17a..262421e5d917 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
/* Parameters for the waiting for link up routine */
@@ -46,10 +47,18 @@
#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
@@ -114,6 +123,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
* Root Port Registers (PCI configuration space for the root port function)
@@ -189,6 +199,14 @@
/* AXI link down register */
#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
enum cdns_pcie_rp_bar {
RP_BAR_UNDEFINED = -1,
RP_BAR0,
@@ -295,6 +313,7 @@ struct cdns_pcie {
* @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -303,14 +322,17 @@ struct cdns_pcie_rc {
u32 vendor_id;
u32 device_id;
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
};
/**
* struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
* @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
*/
struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -334,6 +356,7 @@ struct cdns_pcie_epf {
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -348,6 +371,7 @@ struct cdns_pcie_ep {
/* protect writing to PCI_STATUS while raising legacy interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
+ unsigned int quirk_detect_quiet_flag:1;
};
@@ -508,6 +532,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return 0;
}
#endif
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 423d35872ce4..76c0a63a3f64 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -214,6 +214,17 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
config PCIE_INTEL_GW
bool "Intel Gateway PCIe host controller support"
depends on OF && (X86 || COMPILE_TEST)
@@ -225,6 +236,34 @@ config PCIE_INTEL_GW
The PCIe controller uses the DesignWare core plus Intel-specific
hardware wrappers.
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller - Host mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller - Endpoint mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
@@ -286,6 +325,15 @@ config PCIE_TEGRA194_EP
in order to enable device-specific features PCIE_TEGRA194_EP must be
selected. This uses the DesignWare core.
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
+
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 9e6ce0dc2f53..73244409792c 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -14,13 +14,16 @@ obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 047cfbdc1330..fbbb78f6885e 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -204,7 +204,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
- int pos, irq;
+ int pos;
val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
(index * MSI_REG_CTRL_BLOCK_SIZE));
@@ -213,9 +213,8 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (index * MAX_MSI_IRQS_PER_CTRL) + pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
pos++;
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
@@ -257,7 +256,7 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
struct dw_pcie *pci;
struct pcie_port *pp;
unsigned long reg;
- u32 virq, bit;
+ u32 bit;
chained_irq_enter(chip, desc);
@@ -276,11 +275,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
case INTB:
case INTC:
case INTD:
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_find_mapping(dra7xx->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
break;
}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index bde3b2824e89..865258d8c53c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -259,14 +259,12 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
u32 pending;
- int virq;
pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -579,7 +577,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 vector, virq, reg, pos;
+ u32 vector, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -600,10 +598,8 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
continue;
vector = offset + (pos << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
- virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 597c282f586c..c91fc1954432 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -384,6 +384,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
+ u32 val;
match = of_match_device(artpec6_pcie_of_match, dev);
if (!match)
@@ -432,9 +433,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
break;
- case DW_PCIE_EP_TYPE: {
- u32 val;
-
+ case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
@@ -445,8 +444,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
- }
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8d028a88b375..998b698f4085 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -125,7 +125,7 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -202,7 +202,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -217,7 +217,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -276,7 +276,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -292,9 +292,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -309,7 +308,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -333,7 +332,8 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -358,7 +358,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -382,8 +382,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -418,7 +418,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -450,7 +450,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -525,14 +525,14 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -593,14 +593,14 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index a608ae1fad57..d1d9b8344ec9 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -55,7 +55,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- int i, pos, irq;
+ int i, pos;
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
@@ -74,10 +74,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
pos = 0;
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
pos++;
}
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 9b397c807261..8851eb161a0e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -164,7 +164,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000000..c9b341e55cbb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return reset_control_deassert(rockchip->rst);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct pcie_port *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = rockchip_pcie_reset_control_release(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000000..1ac29a6eef22
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct pcie_port *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 3ec7b29d5dc7..904976913081 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -497,19 +497,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
- u32 val, tmp;
+ u32 status_l0, status_l1, link_status;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
pex_ep_event_hot_rst_done(pcie);
- if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
- tmp = appl_readl(pcie, APPL_LINK_STATUS);
- if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
dw_pcie_ep_linkup(ep);
}
@@ -518,11 +518,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
spurious = 0;
}
- if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
- if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
return IRQ_WAKE_THREAD;
spurious = 0;
@@ -530,8 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
if (spurious) {
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
- val);
- appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
}
return IRQ_HANDLED;
@@ -1493,6 +1493,16 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
return;
}
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
if (tegra_pcie_try_link_l2(pcie)) {
dev_info(pcie->dev, "Link didn't transition to L2 state\n");
/*
@@ -1763,7 +1773,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
- val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
ret = dw_pcie_ep_init_complete(ep);
@@ -1935,13 +1945,6 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
return ret;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
- pcie->cid);
- if (!name) {
- dev_err(dev, "Failed to create PCIe EP work thread string\n");
- return -ENOMEM;
- }
-
pm_runtime_enable(dev);
ret = dw_pcie_ep_init(ep);
@@ -2236,6 +2239,11 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
if (!pcie->link_state)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 7e8bad326770..d842fd018129 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -235,7 +235,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
- u32 val, bit, virq;
+ u32 val, bit;
/* INT for debug */
val = readl(priv->base + PCL_RCV_INT);
@@ -257,10 +257,8 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
val = readl(priv->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(priv->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000000..a88eab6829bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct pcie_port *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0) {
+ dev_err(dev, "Interrupt intr is missing");
+ return pp->irq;
+ }
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index c637de3a389b..f3547aa60140 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -92,7 +92,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
- u32 bit, virq, val, mask;
+ u32 bit, val, mask;
/*
* The core provides a single interrupt for both INTx/MSI messages.
@@ -114,11 +114,10 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(rp->intx_domain,
- bit + 1);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(rp->intx_domain,
+ bit + 1);
+ if (ret)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
bit);
@@ -155,9 +154,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
msi_data, msi_addr_hi, msi_addr_lo);
- virq = irq_find_mapping(msi->dev_domain, msi_data);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, msi_data);
msi_status = readl_relaxed(pcie->apb_csr_base +
MSI_STATUS_OFFSET);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c95ebe808f92..596ebcfcc82d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -58,6 +58,7 @@
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -118,6 +119,46 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_COUNT 8
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_ENABLE BIT(0)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
+#define OB_WIN_FUNC_NUM_SHIFT 24
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
+#define OB_WIN_MSG_CODE_SHIFT 14
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
+#define OB_WIN_ATTR_ENABLE BIT(11)
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
+#define OB_WIN_ATTR_TC_SHIFT 8
+#define OB_WIN_ATTR_RELAXED BIT(7)
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
+#define OB_WIN_ATTR_POISON BIT(5)
+#define OB_WIN_ATTR_IDO BIT(4)
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
+#define OB_WIN_TYPE_SHIFT 0
+#define OB_WIN_TYPE_MEM 0x0
+#define OB_WIN_TYPE_IO 0x4
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
+#define OB_WIN_TYPE_MSG 0xc
+
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@@ -166,7 +207,7 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-#define PIO_RETRY_CNT 500
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
@@ -177,11 +218,21 @@
#define MSI_IRQ_NUM 32
+#define CFG_RD_CRS_VAL 0xffff0001
+
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
+ struct {
+ phys_addr_t match;
+ phys_addr_t remap;
+ phys_addr_t mask;
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
struct irq_chip msi_bottom_irq_chip;
@@ -366,9 +417,39 @@ err:
dev_err(dev, "link never came up\n");
}
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+ phys_addr_t match, phys_addr_t remap,
+ phys_addr_t mask, u32 actions)
+{
+ advk_writel(pcie, OB_WIN_ENABLE |
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
+ int i;
/* Enable TX */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
@@ -447,15 +528,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+ /*
+ * Enable AXI address window location generation:
+ * When it is enabled, the default outbound window
+ * configurations (Default User Field: 0xD0074CFC)
+ * are used to transparent address translation for
+ * the outbound transactions. Thus, PCIe address
+ * windows are not required for transparent memory
+ * access when default outbound window configuration
+ * is set for memory access.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Bypass the address window mapping for PIO */
+ /*
+ * Set memory access in Default User Field so it
+ * is not required to configure PCIe address for
+ * transparent memory access.
+ */
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+ /*
+ * Bypass the address window mapping for PIO:
+ * Since PIO access already contains all required
+ * info over AXI interface by PIO registers, the
+ * address window is not required.
+ */
reg = advk_readl(pcie, PIO_CTRL);
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * Configure PCIe address windows for non-memory or
+ * non-transparent access as by default PCIe uses
+ * transparent memory access.
+ */
+ for (i = 0; i < pcie->wins_count; i++)
+ advk_pcie_set_ob_win(pcie, i,
+ pcie->wins[i].match, pcie->wins[i].remap,
+ pcie->wins[i].mask, pcie->wins[i].actions);
+
+ /* Disable remaining PCIe outbound windows */
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
advk_pcie_train_link(pcie);
/*
@@ -472,7 +589,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -483,14 +600,70 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
PIO_COMPLETION_STATUS_SHIFT;
- if (!status)
- return;
-
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
+ if (allow_crs && val) {
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is enabled:
+ * For a Configuration Read Request that includes both
+ * bytes of the Vendor ID field of a device Function's
+ * Configuration Space Header, the Root Complex must
+ * complete the Request to the host by returning a
+ * read-data value of 0001h for the Vendor ID field and
+ * all '1's for any additional bytes included in the
+ * request.
+ *
+ * So CRS in this case is not an error status.
+ */
+ *val = CFG_RD_CRS_VAL;
+ strcomp_status = NULL;
+ break;
+ }
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * If CRS Software Visibility is enabled: For a Configuration
+ * Write Request or for any other Configuration Read Request,
+ * the Root Complex must re-issue the Configuration Request as
+ * a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * To simplify implementation do not re-issue the Configuration
+ * Request and complete the Request as a failed transaction.
+ */
strcomp_status = "CRS";
break;
case PIO_COMPLETION_STATUS_CA:
@@ -501,6 +674,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
break;
}
+ if (!strcomp_status)
+ return 0;
+
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
@@ -508,6 +684,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return -EFAULT;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
@@ -545,6 +723,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+ *value |= PCI_EXP_RTCAP_CRSVIS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -626,6 +805,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
+ int ret;
bridge->conf.vendor =
cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
@@ -649,7 +829,15 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- return pci_bridge_emul_init(bridge, 0);
+ /* PCIe config space can be initialized after pci_bridge_emul_init() */
+ ret = pci_bridge_emul_init(bridge, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Indicates supports for Completion Retry Status */
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
+ return 0;
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -701,6 +889,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
struct advk_pcie *pcie = bus->sysdata;
+ bool allow_crs;
u32 reg;
int ret;
@@ -713,7 +902,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
+ /*
+ * Completion Retry Status is possible to return only when reading all
+ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
+ * CRSSVE flag on Root Bridge is enabled.
+ */
+ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
+ PCI_EXP_RTCTL_CRSSVE);
+
if (advk_pcie_pio_is_running(pcie)) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
@@ -741,14 +947,25 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
- advk_pcie_check_pio_status(pcie);
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ if (ret < 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
- /* Get the read result */
- *val = advk_readl(pcie, PIO_RD_DATA);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
@@ -812,7 +1029,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL;
}
@@ -886,22 +1105,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -985,6 +1210,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct irq_chip *irq_chip;
int ret = 0;
+ raw_spin_lock_init(&pcie->irq_lock);
+
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
@@ -1049,7 +1276,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
{
u32 isr0_val, isr0_mask, isr0_status;
u32 isr1_val, isr1_mask, isr1_status;
- int i, virq;
+ int i;
isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
@@ -1077,8 +1304,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
PCIE_ISR1_REG);
- virq = irq_find_mapping(pcie->irq_domain, i);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(pcie->irq_domain, i);
}
}
@@ -1162,6 +1388,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
@@ -1172,6 +1399,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ resource_size_t start = entry->res->start;
+ resource_size_t size = resource_size(entry->res);
+ unsigned long type = resource_type(entry->res);
+ u64 win_size;
+
+ /*
+ * Aardvark hardware allows to configure also PCIe window
+ * for config type 0 and type 1 mapping, but driver uses
+ * only PIO for issuing configuration transfers which does
+ * not use PCIe window configuration.
+ */
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
+ type != IORESOURCE_IO)
+ continue;
+
+ /*
+ * Skip transparent memory resources. Default outbound access
+ * configuration is set to transparent memory access so it
+ * does not need window configuration.
+ */
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
+ entry->offset == 0)
+ continue;
+
+ /*
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
+ * and an access to address A uses this window if A matches the
+ * match with given mask.
+ * So every PCIe window size must be a power of two and every start
+ * address must be aligned to window size. Minimal size is 64 KiB
+ * because lower 16 bits of mask must be zero. Remapped address
+ * may have set only bits from the mask.
+ */
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+ /* Calculate the largest aligned window size */
+ win_size = (1ULL << (fls64(size)-1)) |
+ (start ? (1ULL << __ffs64(start)) : 0);
+ win_size = 1ULL << __ffs64(win_size);
+ if (win_size < 0x10000)
+ break;
+
+ dev_dbg(dev,
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+ pcie->wins_count, (unsigned long long)start,
+ (unsigned long long)start + win_size, type);
+
+ if (type == IORESOURCE_IO) {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+ } else {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+ pcie->wins[pcie->wins_count].match = start;
+ }
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+ break;
+
+ start += win_size;
+ size -= win_size;
+ pcie->wins_count++;
+ }
+
+ if (size > 0) {
+ dev_err(&pcie->pdev->dev,
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
+ (unsigned long long)entry->res->start,
+ (unsigned long long)entry->res->end + 1);
+ return -EINVAL;
+ }
+ }
+
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
@@ -1252,6 +1553,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ int i;
pci_lock_rescan_remove();
pci_stop_root_bus(bridge->bus);
@@ -1261,6 +1563,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
+ /* Disable outbound address windows mapping */
+ for (i = 0; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
return 0;
}
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index aefef1986201..88980a44461d 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -314,7 +314,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
for (i = 0; i < 4; i++) {
if ((irq_stat & BIT(i)) == 0)
continue;
- generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+ generic_handle_domain_irq(p->irqdomain, i);
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index a53bd8728d0d..eaec915ffe62 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irqdomain.h>
@@ -64,6 +65,7 @@ enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
+ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -73,6 +75,7 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_4,
PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
@@ -122,6 +125,8 @@ enum pci_message_type {
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
+ PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
+ PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
PCI_MESSAGE_MAXIMUM
};
@@ -235,6 +240,21 @@ struct hv_msi_desc2 {
u16 processor_array[32];
} __packed;
+/*
+ * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
+ * Everything is the same as in 'hv_msi_desc2' except that the size of the
+ * 'vector' field is larger to support bigger vector values. For ex: LPI
+ * vectors on ARM.
+ */
+struct hv_msi_desc3 {
+ u32 vector;
+ u8 delivery_mode;
+ u8 reserved;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
/**
* struct tran_int_desc
* @reserved: unused, padding
@@ -383,6 +403,12 @@ struct pci_create_interrupt2 {
struct hv_msi_desc2 int_desc;
} __packed;
+struct pci_create_interrupt3 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc3 int_desc;
+} __packed;
+
struct pci_delete_interrupt {
struct pci_message message_type;
union win_slot_encoding wslot;
@@ -448,7 +474,13 @@ enum hv_pcibus_state {
};
struct hv_pcibus_device {
+#ifdef CONFIG_X86
struct pci_sysdata sysdata;
+#elif defined(CONFIG_ARM64)
+ struct pci_config_window sysdata;
+#endif
+ struct pci_host_bridge *bridge;
+ struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
@@ -464,8 +496,6 @@ struct hv_pcibus_device {
spinlock_t device_list_lock; /* Protect lists below */
void __iomem *cfg_addr;
- struct list_head resources_for_children;
-
struct list_head children;
struct list_head dr_list;
@@ -1328,6 +1358,15 @@ static u32 hv_compose_msi_req_v1(
return sizeof(*int_pkt);
}
+/*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
+{
+ return cpumask_first_and(affinity, cpu_online_mask);
+}
+
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector)
@@ -1339,12 +1378,27 @@ static u32 hv_compose_msi_req_v2(
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
- /*
- * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
- * by subsequent retarget in hv_irq_unmask().
- */
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v3(
+ struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
+ u32 slot, u32 vector)
+{
+ int cpu;
+
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.reserved = 0;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1379,6 +1433,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
union {
struct pci_create_interrupt v1;
struct pci_create_interrupt2 v2;
+ struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
@@ -1426,6 +1481,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
cfg->vector);
break;
+ case PCI_PROTOCOL_VERSION_1_4:
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
+ dest,
+ hpdev->desc.win_slot.slot,
+ cfg->vector);
+ break;
+
default:
/* As we only negotiate protocol versions known to this driver,
* this path should never hit. However, this is it not a hot
@@ -1566,7 +1628,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.handler = handle_edge_irq;
hbus->msi_info.handler_name = "edge";
hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
+ hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
&hbus->msi_info,
x86_vector_domain);
if (!hbus->irq_domain) {
@@ -1575,6 +1637,8 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
return -ENODEV;
}
+ dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
+
return 0;
}
@@ -1797,7 +1861,7 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
- hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
name, NULL);
if (IS_ERR(hpdev->pci_slot)) {
pr_warn("pci_create slot %s failed\n", name);
@@ -1827,7 +1891,7 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
{
struct pci_dev *dev;
- struct pci_bus *bus = hbus->pci_bus;
+ struct pci_bus *bus = hbus->bridge->bus;
struct hv_pci_dev *hv_dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -1850,21 +1914,22 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
*/
static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
{
- /* Register the device */
- hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
- 0, /* bus number is always zero */
- &hv_pcifront_ops,
- &hbus->sysdata,
- &hbus->resources_for_children);
- if (!hbus->pci_bus)
- return -ENODEV;
+ int error;
+ struct pci_host_bridge *bridge = hbus->bridge;
+
+ bridge->dev.parent = &hbus->hdev->device;
+ bridge->sysdata = &hbus->sysdata;
+ bridge->ops = &hv_pcifront_ops;
+
+ error = pci_scan_root_bus_bridge(bridge);
+ if (error)
+ return error;
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_numa_node(hbus);
- pci_bus_assign_resources(hbus->pci_bus);
+ pci_bus_assign_resources(bridge->bus);
hv_pci_assign_slots(hbus);
- pci_bus_add_devices(hbus->pci_bus);
+ pci_bus_add_devices(bridge->bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
@@ -2127,7 +2192,7 @@ static void pci_devices_present_work(struct work_struct *work)
* because there may have been changes.
*/
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
+ pci_scan_child_bus(hbus->bridge->bus);
hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
@@ -2295,11 +2360,11 @@ static void hv_eject_device_work(struct work_struct *work)
/*
* Ejection can come before or after the PCI bus has been set up, so
* attempt to find it and tear down the bus state, if it exists. This
- * must be done without constructs like pci_domain_nr(hbus->pci_bus)
- * because hbus->pci_bus may not exist yet.
+ * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
+ * because hbus->bridge->bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -2662,8 +2727,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->low_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
}
if (hbus->high_mmio_space) {
@@ -2682,8 +2746,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->high_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
}
return 0;
@@ -3002,6 +3065,7 @@ static void hv_put_dom_num(u16 dom)
static int hv_pci_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
u16 dom_req, dom;
char *name;
@@ -3014,6 +3078,10 @@ static int hv_pci_probe(struct hv_device *hdev,
*/
BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
+ bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
+ if (!bridge)
+ return -ENOMEM;
+
/*
* With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
* alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
@@ -3035,6 +3103,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+
+ hbus->bridge = bridge;
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@@ -3066,17 +3136,19 @@ static int hv_pci_probe(struct hv_device *hdev,
"PCI dom# 0x%hx has collision, using 0x%hx",
dom_req, dom);
+ hbus->bridge->domain_nr = dom;
+#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+#endif
hbus->hdev = hdev;
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
- INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
spin_lock_init(&hbus->retarget_msi_interrupt_lock);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
- hbus->sysdata.domain);
+ hbus->bridge->domain_nr);
if (!hbus->wq) {
ret = -ENOMEM;
goto free_dom;
@@ -3113,9 +3185,9 @@ static int hv_pci_probe(struct hv_device *hdev,
goto unmap;
}
- hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
+ hbus->fwnode = irq_domain_alloc_named_fwnode(name);
kfree(name);
- if (!hbus->sysdata.fwnode) {
+ if (!hbus->fwnode) {
ret = -ENOMEM;
goto unmap;
}
@@ -3193,7 +3265,7 @@ exit_d0:
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
unmap:
iounmap(hbus->cfg_addr);
free_config:
@@ -3203,7 +3275,7 @@ close:
destroy_wq:
destroy_workqueue(hbus->wq);
free_dom:
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
free_bus:
kfree(hbus);
return ret;
@@ -3295,9 +3367,9 @@ static int hv_pci_remove(struct hv_device *hdev)
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
- pci_stop_root_bus(hbus->pci_bus);
+ pci_stop_root_bus(hbus->bridge->bus);
hv_pci_remove_slots(hbus);
- pci_remove_root_bus(hbus->pci_bus);
+ pci_remove_root_bus(hbus->bridge->bus);
pci_unlock_rescan_remove();
}
@@ -3307,12 +3379,11 @@ static int hv_pci_remove(struct hv_device *hdev)
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
- pci_free_resource_list(&hbus->resources_for_children);
hv_pci_free_bridge_windows(hbus);
irq_domain_remove(hbus->irq_domain);
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
kfree(hbus);
return ret;
@@ -3390,7 +3461,7 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
*/
static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
{
- pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
+ pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
}
static int hv_pci_resume(struct hv_device *hdev)
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index c979229a6d0d..cb0aa65d6934 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -372,11 +372,6 @@ struct tegra_pcie_port {
struct gpio_desc *reset_gpio;
};
-struct tegra_pcie_bus {
- struct list_head list;
- unsigned int nr;
-};
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -764,7 +759,7 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
static irqreturn_t tegra_pcie_isr(int irq, void *arg)
{
- const char *err_msg[] = {
+ static const char * const err_msg[] = {
"Unknown",
"AXI slave error",
"AXI decode error",
@@ -1553,12 +1548,10 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
while (reg) {
unsigned int offset = find_first_bit(&reg, 32);
unsigned int index = i * 32 + offset;
- unsigned int irq;
+ int ret;
- irq = irq_find_mapping(msi->domain->parent, index);
- if (irq) {
- generic_handle_irq(irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/*
* that's weird who triggered this?
* just clear it
@@ -2193,13 +2186,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base))
- return PTR_ERR(rp->base);
+ if (IS_ERR(rp->base)) {
+ err = PTR_ERR(rp->base);
+ goto err_node_put;
+ }
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
if (!label) {
- dev_err(dev, "failed to create reset GPIO label\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_node_put;
}
/*
@@ -2217,7 +2212,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
} else {
dev_err(dev, "failed to get reset GPIO: %ld\n",
PTR_ERR(rp->reset_gpio));
- return PTR_ERR(rp->reset_gpio);
+ err = PTR_ERR(rp->reset_gpio);
+ goto err_node_put;
}
}
@@ -2548,7 +2544,7 @@ static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
if (list_empty(&pcie->ports))
return NULL;
- seq_printf(s, "Index Status\n");
+ seq_puts(s, "Index Status\n");
return seq_list_start(&pcie->ports, *pos);
}
@@ -2585,16 +2581,16 @@ static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%2u ", port->index);
if (up)
- seq_printf(s, "up");
+ seq_puts(s, "up");
if (active) {
if (up)
- seq_printf(s, ", ");
+ seq_puts(s, ", ");
- seq_printf(s, "active");
+ seq_puts(s, "active");
}
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
return 0;
}
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 1c34c897a7e2..b7a8e062fcc5 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -291,8 +291,7 @@ static void xgene_msi_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xgene_msi_group *msi_groups;
struct xgene_msi *xgene_msi;
- unsigned int virq;
- int msir_index, msir_val, hw_irq;
+ int msir_index, msir_val, hw_irq, ret;
u32 intr_index, grp_select, msi_grp;
chained_irq_enter(chip, desc);
@@ -330,10 +329,8 @@ static void xgene_msi_isr(struct irq_desc *desc)
* CPU0
*/
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
- WARN_ON(!virq);
- if (virq != 0)
- generic_handle_irq(virq);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ WARN_ON_ONCE(ret);
msir_val &= ~(1 << intr_index);
}
grp_select &= ~(1 << msir_index);
@@ -451,7 +448,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xgene_msi->msi_regs)) {
- dev_err(&pdev->dev, "no reg space\n");
rc = PTR_ERR(xgene_msi->msi_regs);
goto error;
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 98aa1dccc6e6..7b1d3ebc34ec 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -55,7 +55,7 @@ static void altera_msi_isr(struct irq_desc *desc)
struct altera_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
msi = irq_desc_get_handler_data(desc);
@@ -65,11 +65,9 @@ static void altera_msi_isr(struct irq_desc *desc)
/* Dummy read from vector to clear the interrupt */
readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(&msi->pdev->dev, "unexpected MSI\n");
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
+ dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n");
}
}
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 523bd928b380..2513e9363236 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -646,7 +646,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
struct device *dev;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
@@ -658,11 +658,9 @@ static void altera_pcie_isr(struct irq_desc *desc)
/* clear interrupts */
cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
- virq = irq_find_mapping(pcie->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", bit);
+ ret = generic_handle_domain_irq(pcie->irq_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 08bc788d9422..cc30215f5a43 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -476,7 +476,7 @@ static struct msi_domain_info brcm_msi_domain_info = {
static void brcm_pcie_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long status, virq;
+ unsigned long status;
struct brcm_msi *msi;
struct device *dev;
u32 bit;
@@ -489,10 +489,9 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)
status >>= msi->legacy_shift;
for_each_set_bit(bit, &status, msi->nr) {
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
dev_dbg(dev, "unexpected MSI\n");
}
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index 56b8ee7bf330..f918c713afb0 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
pcie->mem.name = "PCIe MEM space";
pcie->mem.flags = IORESOURCE_MEM;
- pci_add_resource(&resources, &pcie->mem);
+ pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ if (ret)
+ return ret;
pcie->map_irq = iproc_pcie_bcma_map_irq;
- ret = iproc_pcie_setup(pcie, &resources);
- if (ret) {
- dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
- return ret;
- }
-
bcma_set_drvdata(bdev, pcie);
- return 0;
+
+ return iproc_pcie_setup(pcie, &bridge->windows);
}
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 35a82124a126..757b7fbcdc59 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -326,7 +326,6 @@ static void iproc_msi_handler(struct irq_desc *desc)
struct iproc_msi *msi;
u32 eq, head, tail, nr_events;
unsigned long hwirq;
- int virq;
chained_irq_enter(chip, desc);
@@ -362,8 +361,7 @@ static void iproc_msi_handler(struct irq_desc *desc)
/* process all outstanding events */
while (nr_events--) {
hwirq = decode_msi_hwirq(msi, eq, head);
- virq = irq_find_mapping(msi->inner_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->inner_domain, hwirq);
head++;
head %= EQ_LEN;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index f3aeb8d4eaca..17c59b0d6978 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -645,7 +645,6 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
{
struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
unsigned long msi_enable, msi_status;
- unsigned int virq;
irq_hw_number_t bit, hwirq;
msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
@@ -659,8 +658,7 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
- virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->msi_bottom_domain, hwirq);
}
} while (true);
}
@@ -670,18 +668,15 @@ static void mtk_pcie_irq_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- unsigned int virq;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
chained_irq_enter(irqchip, desc);
status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
- PCIE_INTX_SHIFT) {
- virq = irq_find_mapping(port->intx_domain,
- irq_bit - PCIE_INTX_SHIFT);
- generic_handle_irq(virq);
- }
+ PCIE_INTX_SHIFT)
+ generic_handle_domain_irq(port->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
irq_bit = PCIE_MSI_SHIFT;
for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 25bee693834f..2f3f974977a3 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -14,6 +14,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -23,6 +24,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
@@ -207,6 +209,7 @@ struct mtk_pcie_port {
* struct mtk_pcie - PCIe host information
* @dev: pointer to PCIe device
* @base: IO mapped register base
+ * @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
* @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
@@ -215,6 +218,7 @@ struct mtk_pcie_port {
struct mtk_pcie {
struct device *dev;
void __iomem *base;
+ struct regmap *cfg;
struct clk *free_ck;
struct list_head ports;
@@ -602,7 +606,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- u32 virq;
u32 bit = INTX_SHIFT;
chained_irq_enter(irqchip, desc);
@@ -612,9 +615,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
- virq = irq_find_mapping(port->irq_domain,
- bit - INTX_SHIFT);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->irq_domain,
+ bit - INTX_SHIFT);
}
}
@@ -623,10 +625,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
- for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- virq = irq_find_mapping(port->inner_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
}
/* Clear MSI interrupt status */
writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
@@ -650,7 +650,11 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- port->irq = platform_get_irq(pdev, port->slot);
+ if (of_find_property(dev->of_node, "interrupt-names", NULL))
+ port->irq = platform_get_irq_byname(pdev, "pcie_irq");
+ else
+ port->irq = platform_get_irq(pdev, port->slot);
+
if (port->irq < 0)
return port->irq;
@@ -682,6 +686,10 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
val |= PCIE_CSR_LTSSM_EN(port->slot) |
PCIE_CSR_ASPM_L1_EN(port->slot);
writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ } else if (pcie->cfg) {
+ val = PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
}
/* Assert all reset signals */
@@ -985,6 +993,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
+ struct device_node *cfg_node;
int err;
/* get shared registers, which are optional */
@@ -995,6 +1004,14 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return PTR_ERR(pcie->base);
}
+ cfg_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,generic-pciecfg");
+ if (cfg_node) {
+ pcie->cfg = syscon_node_to_regmap(cfg_node);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+ }
+
pcie->free_ck = devm_clk_get(dev, "free_ck");
if (IS_ERR(pcie->free_ck)) {
if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
@@ -1027,22 +1044,27 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node, *child;
struct mtk_pcie_port *port, *tmp;
- int err;
+ int err, slot;
+
+ slot = of_get_pci_domain_nr(dev->of_node);
+ if (slot < 0) {
+ for_each_available_child_of_node(node, child) {
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ dev_err(dev, "failed to get devfn: %d\n", err);
+ goto error_put_node;
+ }
- for_each_available_child_of_node(node, child) {
- int slot;
+ slot = PCI_SLOT(err);
- err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", err);
- goto error_put_node;
+ err = mtk_pcie_parse_port(pcie, child, slot);
+ if (err)
+ goto error_put_node;
}
-
- slot = PCI_SLOT(err);
-
- err = mtk_pcie_parse_port(pcie, child, slot);
+ } else {
+ err = mtk_pcie_parse_port(pcie, node, slot);
if (err)
- goto error_put_node;
+ return err;
}
err = mtk_pcie_subsys_powerup(pcie);
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index fdab8202ae5d..329f930d17aa 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -412,16 +412,14 @@ static void mc_handle_msi(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
bit);
}
@@ -570,17 +568,15 @@ static void mc_handle_intx(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
status >>= PM_MSI_INT_INTX_SHIFT;
for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(port->intx_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
bit);
}
@@ -745,7 +741,7 @@ static void mc_handle_event(struct irq_desc *desc)
events = get_events(port);
for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_irq(irq_find_mapping(port->event_domain, bit));
+ generic_handle_domain_irq(port->event_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index b4a288e24aaf..aa1cf24a5a72 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -159,7 +159,7 @@ static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
return 0;
}
-static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -195,7 +195,7 @@ static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
@@ -246,7 +246,7 @@ static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
return 0;
}
-static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -259,7 +259,8 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -272,7 +273,7 @@ static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
return 0;
}
-static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -285,7 +286,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
-static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr, size_t size)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -322,7 +323,7 @@ static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -403,7 +404,7 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
return 0;
}
-static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -451,7 +452,7 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
};
static const struct pci_epc_features*
-rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rcar_pcie_epc_features;
}
@@ -492,9 +493,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
pcie->dev = dev;
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
goto err_pm_disable;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 765cf2b45e24..8f3131844e77 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -13,12 +13,14 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -41,6 +43,21 @@ struct rcar_msi {
int irq2;
};
+#ifdef CONFIG_ARM
+/*
+ * Here we keep a static copy of the remapped PCIe controller address.
+ * This is only used on aarch32 systems, all of which have one single
+ * PCIe controller, to provide quick access to the PCIe controller in
+ * the L1 link state fixup function, called from the ARM fault handler.
+ */
+static void __iomem *pcie_base;
+/*
+ * Static copy of bus clock pointer, so we can check whether the clock
+ * is enabled or not.
+ */
+static struct clk *pcie_bus_clk;
+#endif
+
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -486,12 +503,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
while (reg) {
unsigned int index = find_first_bit(&reg, 32);
- unsigned int msi_irq;
+ int ret;
- msi_irq = irq_find_mapping(msi->domain->parent, index);
- if (msi_irq) {
- generic_handle_irq(msi_irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
@@ -776,6 +791,12 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
}
host->msi.irq2 = i;
+#ifdef CONFIG_ARM
+ /* Cache static copy for L1 link state fixup hook on aarch32 */
+ pcie_base = pcie->base;
+ pcie_bus_clk = host->bus_clk;
+#endif
+
return 0;
err_irq2:
@@ -1031,4 +1052,67 @@ static struct platform_driver rcar_pcie_driver = {
},
.probe = rcar_pcie_probe,
};
+
+#ifdef CONFIG_ARM
+static DEFINE_SPINLOCK(pmsr_lock);
+static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+ spin_lock_irqsave(&pmsr_lock, flags);
+
+ if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
+ ret = 1;
+ goto unlock_exit;
+ }
+
+ pmsr = readl(pcie_base + PMSR);
+
+ /*
+ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+ * the PCIe controller is not in L1 link state. If true, apply
+ * fix, which will put the controller into L1 link state, from
+ * which it can return to L0s/L0 on its own.
+ */
+ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+unlock_exit:
+ spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+}
+
+static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
+ { .compatible = "renesas,pcie-r8a7779" },
+ { .compatible = "renesas,pcie-r8a7790" },
+ { .compatible = "renesas,pcie-r8a7791" },
+ { .compatible = "renesas,pcie-rcar-gen2" },
+ {},
+};
+
+static int __init rcar_pcie_init(void)
+{
+ if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
+#ifdef CONFIG_ARM_LPAE
+ hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "asynchronous external abort");
+#else
+ hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+#endif
+ }
+
+ return platform_driver_register(&rcar_pcie_driver);
+}
+device_initcall(rcar_pcie_init);
+#else
builtin_platform_driver(rcar_pcie_driver);
+#endif
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
index d4c698b5f821..9bb125db85c6 100644
--- a/drivers/pci/controller/pcie-rcar.h
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -85,6 +85,13 @@
#define LTSMDIS BIT(31)
#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
+#define L1FAEG BIT(31)
+#define PMEL1RX BIT(23)
+#define PMSTATE GENMASK(18, 16)
+#define PMSTATE_L1 (3 << 16)
+#define PMCTLR 0x011060
+#define L1IATN BIT(31)
+
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 7631dc3961c1..5fb9ce6e536e 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -122,7 +122,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
}
-static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -159,7 +159,7 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -227,7 +227,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -256,7 +256,7 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
-static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
@@ -284,7 +284,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -308,7 +308,7 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
u8 multi_msg_cap)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -329,7 +329,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -471,7 +471,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -510,7 +510,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
};
static const struct pci_epc_features*
-rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rockchip_pcie_epc_features;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 78d04ac29cd5..c52316d0bfd2 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -517,7 +517,7 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
struct device *dev = rockchip->dev;
u32 reg;
u32 hwirq;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
@@ -528,10 +528,8 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
hwirq = ffs(reg) - 1;
reg &= ~BIT(hwirq);
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+ if (ret)
dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
}
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 67937facd90c..95426df03200 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -222,7 +222,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
for_each_set_bit(i, &val, PCI_NUM_INTX)
- generic_handle_irq(irq_find_mapping(port->intx_domain, i));
+ generic_handle_domain_irq(port->intx_domain, i);
chained_irq_exit(chip, desc);
}
@@ -282,7 +282,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
for_each_set_bit(i, &val, 32)
- generic_handle_irq(irq_find_mapping(port->cpm_domain, i));
+ generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
/*
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 8689311c5ef6..a72b4f9a2b00 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -169,6 +170,7 @@ struct nwl_pcie {
u8 last_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -318,18 +320,14 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
struct nwl_pcie *pcie;
unsigned long status;
u32 bit;
- u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &status, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -340,16 +338,13 @@ static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
struct nwl_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
msi = &pcie->msi;
while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
for_each_set_bit(bit, &status, 32) {
nwl_bridge_writel(pcie, 1 << bit, status_reg);
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, bit);
}
}
}
@@ -823,6 +818,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 14001febf59a..aa9bdcebc838 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -385,7 +385,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
- unsigned int irq;
+ struct irq_domain *domain;
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
@@ -399,19 +399,18 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- irq = irq_find_mapping(port->msi_domain->parent, val);
+ domain = port->msi_domain->parent;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- irq = irq_find_mapping(port->leg_domain, val);
+ domain = port->leg_domain;
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
- if (irq)
- generic_handle_irq(irq);
+ generic_handle_domain_irq(domain, val);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e3fcdfec58b3..a5987e52700e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
@@ -447,6 +448,56 @@ static struct pci_ops vmd_ops = {
.write = vmd_pci_write,
};
+#ifdef CONFIG_ACPI
+static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
+{
+ struct pci_host_bridge *bridge;
+ u32 busnr, addr;
+
+ if (pci_dev->bus->ops != &vmd_ops)
+ return NULL;
+
+ bridge = pci_find_host_bridge(pci_dev->bus);
+ busnr = pci_dev->bus->number - bridge->bus->number;
+ /*
+ * The address computation below is only applicable to relative bus
+ * numbers below 32.
+ */
+ if (busnr > 31)
+ return NULL;
+
+ addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
+
+ dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
+ addr);
+
+ return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
+ false);
+}
+
+static bool hook_installed;
+
+static void vmd_acpi_begin(void)
+{
+ if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
+ return;
+
+ hook_installed = true;
+}
+
+static void vmd_acpi_end(void)
+{
+ if (!hook_installed)
+ return;
+
+ pci_acpi_clear_companion_lookup_hook();
+ hook_installed = false;
+}
+#else
+static inline void vmd_acpi_begin(void) { }
+static inline void vmd_acpi_end(void) { }
+#endif /* CONFIG_ACPI */
+
static void vmd_attach_resources(struct vmd_dev *vmd)
{
vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
@@ -747,6 +798,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ vmd_acpi_begin();
+
pci_scan_child_bus(vmd->bus);
pci_assign_unassigned_bus_resources(vmd->bus);
@@ -760,6 +813,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
+ vmd_acpi_end();
+
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
"domain"), "Can't create symlink to domain\n");
return 0;
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index bce274d02dcf..8b4756159f15 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -87,6 +87,7 @@ struct epf_ntb {
struct epf_ntb_epc {
u8 func_no;
+ u8 vfunc_no;
bool linkup;
bool is_msix;
int msix_bar;
@@ -143,14 +144,15 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
struct pci_epc *epc;
+ u8 func_no, vfunc_no;
bool is_msix;
- u8 func_no;
int ret;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
is_msix = ntb_epc->is_msix;
ctrl = ntb_epc->reg;
if (link_up)
@@ -158,7 +160,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
else
ctrl->link_status &= ~LINK_STATUS_UP;
irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
- ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to raise Link Up IRQ\n",
@@ -238,10 +240,10 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
u64 addr, size;
int ret = 0;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -267,8 +269,9 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&epc->dev,
"%s intf: Failed to map memory window %d address\n",
@@ -296,8 +299,8 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -311,8 +314,9 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -385,8 +389,8 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
struct epf_ntb_ctrl *peer_ctrl;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -400,8 +404,9 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
db_entry_size, &db_data, &db_offset);
if (ret) {
dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
@@ -491,10 +496,10 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
u32 db_entry_size, msg_data;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
size_t align;
u64 msg_addr;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -512,12 +517,13 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_entry_size = peer_ctrl->db_entry_size;
for (i = 0; i < db_count; i++) {
msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
msg_data = msix_tbl[i].msg_data;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
db_entry_size);
if (ret) {
dev_err(&epc->dev,
@@ -586,8 +592,8 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -597,8 +603,9 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -728,14 +735,15 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -775,9 +783,9 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
struct pci_epf_bar *peer_epf_bar, *epf_bar;
enum pci_barno peer_barno, barno;
u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
dev = &ntb->epf->dev;
@@ -790,6 +798,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
peer_spad_offset = peer_ntb_epc->reg->spad_offset;
@@ -798,7 +807,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
pci_epc_interface_string(type));
@@ -842,14 +851,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -886,10 +896,10 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct epf_ntb *ntb;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
ntb = ntb_epc->epf_ntb;
@@ -897,10 +907,11 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
pci_epc_interface_string(ntb_epc->type));
@@ -1214,17 +1225,18 @@ static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
struct pci_epf_bar *epf_bar;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
}
@@ -1263,10 +1275,10 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
bool msix_capable, msi_capable;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
u32 db_count;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1282,6 +1294,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
@@ -1293,7 +1306,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
epc = ntb_epc->epc;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, func_no, db_count);
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
if (ret) {
dev_err(dev, "%s intf: MSI configuration failed\n",
pci_epc_interface_string(type));
@@ -1302,7 +1315,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, func_no, db_count,
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
ntb_epc->msix_bar,
ntb_epc->msix_table_offset);
if (ret) {
@@ -1423,11 +1436,11 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
u32 num_mws, db_count;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
size_t align;
int ret, i;
- u8 func_no;
u64 size;
ntb_epc = ntb->epc[type];
@@ -1437,6 +1450,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
num_mws = ntb->num_mws;
db_count = ntb->db_count;
@@ -1464,7 +1478,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: DoorBell BAR set failed\n",
pci_epc_interface_string(type));
@@ -1536,9 +1550,9 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
dev = &ntb->epf->dev;
@@ -1547,6 +1561,7 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
return -ENOMEM;
epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
epf_bar = epf->bar;
@@ -1558,11 +1573,12 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
ntb_epc->linkup = false;
ntb_epc->epc = epc;
ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
ntb_epc->type = type;
ntb_epc->epf_bar = epf_bar;
ntb_epc->epf_ntb = ntb;
- epc_features = pci_epc_get_features(epc, func_no);
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!epc_features)
return -EINVAL;
ntb_epc->epc_features = epc_features;
@@ -1702,10 +1718,10 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1713,6 +1729,7 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
dev = &epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
if (ret) {
@@ -1742,11 +1759,13 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
goto err_db_mw_bar_init;
}
- ret = pci_epc_write_header(epc, func_no, epf->header);
- if (ret) {
- dev_err(dev, "%s intf: Configuration header write failed\n",
- pci_epc_interface_string(type));
- goto err_write_header;
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
}
INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d2708ca4bece..90d84d3bc868 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -247,8 +247,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -263,8 +263,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
- ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -291,13 +291,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -331,8 +331,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -386,7 +386,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -419,8 +419,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -479,7 +479,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -501,13 +501,16 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
switch (irq_type) {
case IRQ_TYPE_LEGACY:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -542,7 +545,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@@ -580,22 +584,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- count = pci_epc_get_msi(epc, epf->func_no);
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
- count = pci_epc_get_msix(epc, epf->func_no);
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
goto reset_handler;
}
@@ -618,7 +622,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
}
@@ -650,7 +655,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -674,16 +680,18 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
+ if (epf->vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
}
ret = pci_epf_test_set_bar(epf);
@@ -691,7 +699,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return ret;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
@@ -699,7 +708,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
@@ -832,7 +842,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index f3a8b833b479..999911801877 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -475,6 +475,28 @@ static struct configfs_attribute *pci_epf_attrs[] = {
NULL,
};
+static int pci_epf_vepf_link(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ return pci_epf_add_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_vepf_unlink(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ pci_epf_remove_vepf(epf_pf, epf_vf);
+}
+
static void pci_epf_release(struct config_item *item)
{
struct pci_epf_group *epf_group = to_pci_epf_group(item);
@@ -487,6 +509,8 @@ static void pci_epf_release(struct config_item *item)
}
static struct configfs_item_operations pci_epf_ops = {
+ .allow_link = pci_epf_vepf_link,
+ .drop_link = pci_epf_vepf_unlink,
.release = pci_epf_release,
};
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index adec9bee72cf..ecbb0fb3b653 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -137,24 +137,29 @@ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
* @epc: the features supported by *this* EPC device will be returned
* @func_no: the features supported by the EPC device specific to the
* endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
*
* Invoke to get the features provided by the EPC which may be
* specific to an endpoint function. Returns pci_epc_features on success
* and NULL for any failures.
*/
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no)
+ u8 func_no, u8 vfunc_no)
{
const struct pci_epc_features *epc_features;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
if (!epc->ops->get_features)
return NULL;
mutex_lock(&epc->lock);
- epc_features = epc->ops->get_features(epc, func_no);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
return epc_features;
@@ -205,13 +210,14 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
@@ -219,11 +225,14 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->raise_irq)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
mutex_unlock(&epc->lock);
return ret;
@@ -235,6 +244,7 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* MSI data
* @epc: the EPC device which has the MSI capability
* @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: the physical address of the outbound region
* @interrupt_num: the MSI interrupt number
* @entry_size: Size of Outbound address region for each interrupt
@@ -250,21 +260,25 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* physical address (in outbound region) of the other interface to ring
* doorbell.
*/
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
- u8 interrupt_num, u32 entry_size, u32 *msi_data,
- u32 *msi_addr_offset)
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
{
int ret;
if (IS_ERR_OR_NULL(epc))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_msi_irq)
return -EINVAL;
mutex_lock(&epc->lock);
- ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
- entry_size, msi_data, msi_addr_offset);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
mutex_unlock(&epc->lock);
return ret;
@@ -274,22 +288,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msi)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msi(epc, func_no);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -304,12 +322,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
{
int ret;
u8 encode_int;
@@ -318,13 +337,16 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
interrupts > 32)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msi)
return 0;
encode_int = order_base_2(interrupts);
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
mutex_unlock(&epc->lock);
return ret;
@@ -334,22 +356,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msix)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msix(epc, func_no);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -362,15 +388,16 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
int ret;
@@ -378,11 +405,15 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
interrupts < 1 || interrupts > 2048)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
mutex_unlock(&epc->lock);
return ret;
@@ -392,22 +423,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->unmap_addr)
return;
mutex_lock(&epc->lock);
- epc->ops->unmap_addr(epc, func_no, phys_addr);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -415,14 +450,15 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
@@ -430,11 +466,15 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_addr)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
mutex_unlock(&epc->lock);
return ret;
@@ -444,12 +484,13 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -457,11 +498,14 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->clear_bar)
return;
mutex_lock(&epc->lock);
- epc->ops->clear_bar(epc, func_no, epf_bar);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -469,12 +513,13 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -489,11 +534,14 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_bar)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
return ret;
@@ -503,7 +551,8 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@@ -511,7 +560,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *header)
{
int ret;
@@ -519,11 +568,18 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ /* Only Virtual Function #1 has deviceID */
+ if (vfunc_no > 1)
+ return -EINVAL;
+
if (!epc->ops->write_header)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->write_header(epc, func_no, header);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
mutex_unlock(&epc->lock);
return ret;
@@ -548,7 +604,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
u32 func_no;
int ret = 0;
- if (IS_ERR_OR_NULL(epc))
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
return -EINVAL;
if (type == PRIMARY_INTERFACE && epf->epc)
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 502eb79cd551..8aea16380870 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -62,13 +62,20 @@ EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
*/
void pci_epf_unbind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return;
}
mutex_lock(&epf->lock);
- epf->driver->ops->unbind(epf);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
@@ -83,10 +90,14 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct device *dev = &epf->dev;
+ struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
int ret;
if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ dev_WARN(dev, "epf device not bound to driver\n");
return -EINVAL;
}
@@ -94,14 +105,141 @@ int pci_epf_bind(struct pci_epf *epf)
return -EAGAIN;
mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
+ mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG) {
+ mutex_unlock(&epf_pf->lock);
+ return -EINVAL;
+ }
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
+/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
@@ -317,6 +455,10 @@ struct pci_epf *pci_epf_create(const char *name)
return ERR_PTR(-ENOMEM);
}
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
dev = &epf->dev;
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index e01d53f5b32f..afa50b446567 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -23,6 +23,7 @@ struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus)
return to_pci_host_bridge(root_bus->bridge);
}
+EXPORT_SYMBOL_GPL(pci_find_host_bridge);
struct device *pci_get_host_bridge_device(struct pci_dev *dev)
{
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index a32070be5adf..cc6194aa24c1 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -40,9 +40,6 @@ ibmphp:
* The return value of pci_hp_register() is not checked.
-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
- and once more in the error path of its caller ibmphp_access_ebda().
-
* The various slot data structures are difficult to follow and need to be
simplified. A lot of functions are too large and too complex, they need
to be broken up into smaller, manageable pieces. Negative examples are
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 11a2661dc062..7fb75401ad8a 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
/* init hpc structure */
hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
if (!hpc_ptr) {
- rc = -ENOMEM;
- goto error_no_hpc;
+ return -ENOMEM;
}
hpc_ptr->ctlr_id = ctlr_id;
hpc_ptr->ctlr_relative_id = ctlr;
@@ -910,8 +909,6 @@ error:
kfree(tmp_slot);
error_no_slot:
free_ebda_hpc(hpc_ptr);
-error_no_hpc:
- iounmap(io_mem);
return rc;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index d4a930881054..69fd401691be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -184,7 +184,7 @@ void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe);
int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9d06939736c0..3024d7e85e6a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -870,7 +870,7 @@ void pcie_disable_interrupt(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 04565162a449..f4c2e6e01be0 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -526,7 +526,7 @@ scan:
return 0;
}
-static int pnv_php_reset_slot(struct hotplug_slot *slot, int probe)
+static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct pci_dev *bridge = php_slot->pdev;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a143b02b2dcd..d84381ce82b5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -310,7 +310,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
/* Check for ranges property */
err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto failed;
+ return 0;
dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 36bc23e21759..a1b1e2a01632 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/rwsem.h>
#include "pci.h"
/*
@@ -934,58 +935,77 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+void pci_set_acpi_fwnode(struct pci_dev *dev)
+{
+ if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ ACPI_COMPANION_SET(&dev->dev,
+ acpi_pci_find_companion(&dev->dev));
+}
+
+/**
+ * pci_dev_acpi_reset - do a function level reset using _RST method
+ * @dev: device to reset
+ * @probe: if true, return 0 if device supports _RST
+ */
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
+
+ if (!handle || !acpi_has_method(handle, "_RST"))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
+ pci_warn(dev, "ACPI _RST failed\n");
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static bool acpi_pci_power_manageable(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (!adev)
+ return false;
+ return acpi_device_power_manageable(adev);
+}
+
static bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const struct fwnode_handle *fwnode;
+ const union acpi_object *obj;
struct acpi_device *adev;
- struct pci_dev *root;
- u8 val;
+ struct pci_dev *rpdev;
if (!dev->is_hotplug_bridge)
return false;
/* Assume D3 support if the bridge is power-manageable by ACPI. */
- adev = ACPI_COMPANION(&dev->dev);
- if (!adev && !pci_dev_is_added(dev)) {
- adev = acpi_pci_find_companion(&dev->dev);
- ACPI_COMPANION_SET(&dev->dev, adev);
- }
-
- if (adev && acpi_device_power_manageable(adev))
+ if (acpi_pci_power_manageable(dev))
return true;
/*
- * Look for a special _DSD property for the root port and if it
- * is set we know the hierarchy behind it supports D3 just fine.
+ * The ACPI firmware will provide the device-specific properties through
+ * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
+ * for the root port and if it is set we know the hierarchy behind it
+ * supports D3 just fine.
*/
- root = pcie_find_root_port(dev);
- if (!root)
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev)
return false;
- adev = ACPI_COMPANION(&root->dev);
- if (root == dev) {
- /*
- * It is possible that the ACPI companion is not yet bound
- * for the root port so look it up manually here.
- */
- if (!adev && !pci_dev_is_added(root))
- adev = acpi_pci_find_companion(&root->dev);
- }
-
+ adev = ACPI_COMPANION(&rpdev->dev);
if (!adev)
return false;
- fwnode = acpi_fwnode_handle(adev);
- if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) < 0)
return false;
- return val == 1;
-}
-
-static bool acpi_pci_power_manageable(struct pci_dev *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- return adev ? acpi_device_power_manageable(adev) : false;
+ return obj->integer.value == 1;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
@@ -1159,6 +1179,69 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
}
/* ACPI bus type */
+
+
+static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
+static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
+
+/**
+ * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
+ * @func: ACPI companion lookup callback pointer or NULL.
+ *
+ * Set a special ACPI companion lookup callback for PCI devices whose companion
+ * objects in the ACPI namespace have _ADR with non-standard bus-device-function
+ * encodings.
+ *
+ * Return 0 on success or a negative error code on failure (in which case no
+ * changes are made).
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback installed by it.
+ */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
+{
+ int ret;
+
+ if (!func)
+ return -EINVAL;
+
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ if (pci_acpi_find_companion_hook) {
+ ret = -EBUSY;
+ } else {
+ pci_acpi_find_companion_hook = func;
+ ret = 0;
+ }
+
+ up_write(&pci_acpi_companion_lookup_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
+
+/**
+ * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
+ *
+ * Clear the special ACPI companion lookup callback previously set by
+ * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
+ * of the callback returns before clearing it.
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback cleared by it.
+ */
+void pci_acpi_clear_companion_lookup_hook(void)
+{
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ pci_acpi_find_companion_hook = NULL;
+
+ up_write(&pci_acpi_companion_lookup_sem);
+}
+EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
+
static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -1166,6 +1249,16 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
+ down_read(&pci_acpi_companion_lookup_sem);
+
+ adev = pci_acpi_find_companion_hook ?
+ pci_acpi_find_companion_hook(pci_dev) : NULL;
+
+ up_read(&pci_acpi_companion_lookup_sem);
+
+ if (adev)
+ return adev;
+
check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index b31883022a8e..49bbd37ee318 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -54,7 +54,7 @@ struct pci_bridge_emul_pcie_conf {
__le16 slotctl;
__le16 slotsta;
__le16 rootctl;
- __le16 rsvd;
+ __le16 rootcap;
__le32 rootsta;
__le32 devcap2;
__le16 devctl2;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b70f61fbcd4b..7fb5cd17cc98 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1367,7 +1367,7 @@ static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->reset_fn)
+ if (!pci_reset_supported(pdev))
return 0;
return a->mode;
@@ -1491,6 +1491,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_config_attr_group,
&pci_dev_rom_attr_group,
&pci_dev_reset_attr_group,
+ &pci_dev_reset_method_attr_group,
&pci_dev_vpd_attr_group,
#ifdef CONFIG_DMI
&pci_dev_smbios_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a5e6759c407b..ce2ab62b64cf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <asm/dma.h>
#include <linux/aer.h>
+#include <linux/bitfield.h>
#include "pci.h"
DEFINE_MUTEX(pci_slot_mutex);
@@ -72,6 +73,11 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
msleep(delay);
}
+bool pci_reset_supported(struct pci_dev *dev)
+{
+ return dev->reset_methods[0] != 0;
+}
+
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
#endif
@@ -206,32 +212,36 @@ int pci_status_get_and_clear_errors(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
#ifdef CONFIG_HAS_IOMEM
-void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
+ bool write_combine)
{
struct resource *res = &pdev->resource[bar];
+ resource_size_t start = res->start;
+ resource_size_t size = resource_size(res);
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
- pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
+ pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap(res->start, resource_size(res));
+
+ if (write_combine)
+ return ioremap_wc(start, size);
+
+ return ioremap(start, size);
+}
+
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ return __pci_ioremap_resource(pdev, bar, false);
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
{
- /*
- * Make sure the BAR is actually a memory resource, not an IO resource
- */
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
- WARN_ON(1);
- return NULL;
- }
- return ioremap_wc(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return __pci_ioremap_resource(pdev, bar, true);
}
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
@@ -265,7 +275,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
*endptr = strchrnul(path, ';');
- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
if (!wpath)
return -ENOMEM;
@@ -915,8 +925,8 @@ static void pci_std_enable_acs(struct pci_dev *dev)
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
- /* Enable Translation Blocking for external devices */
- if (dev->external_facing || dev->untrusted)
+ /* Enable Translation Blocking for external devices and noats */
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
@@ -4629,31 +4639,11 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
/**
- * pcie_has_flr - check if a device supports function level resets
- * @dev: device to check
- *
- * Returns true if the device advertises support for PCIe function level
- * resets.
- */
-bool pcie_has_flr(struct pci_dev *dev)
-{
- u32 cap;
-
- if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
- return false;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- return cap & PCI_EXP_DEVCAP_FLR;
-}
-EXPORT_SYMBOL_GPL(pcie_has_flr);
-
-/**
* pcie_flr - initiate a PCIe function level reset
* @dev: device to reset
*
- * Initiate a function level reset on @dev. The caller should ensure the
- * device supports FLR before calling this function, e.g. by using the
- * pcie_has_flr() helper.
+ * Initiate a function level reset unconditionally on @dev without
+ * checking any flags and DEVCAP
*/
int pcie_flr(struct pci_dev *dev)
{
@@ -4676,7 +4666,29 @@ int pcie_flr(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pcie_flr);
-static int pci_af_flr(struct pci_dev *dev, int probe)
+/**
+ * pcie_reset_flr - initiate a PCIe function level reset
+ * @dev: device to reset
+ * @probe: if true, return 0 if device can be reset this way
+ *
+ * Initiate a function level reset on @dev.
+ */
+int pcie_reset_flr(struct pci_dev *dev, bool probe)
+{
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return -ENOTTY;
+
+ if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ return pcie_flr(dev);
+}
+EXPORT_SYMBOL_GPL(pcie_reset_flr);
+
+static int pci_af_flr(struct pci_dev *dev, bool probe)
{
int pos;
u8 cap;
@@ -4723,7 +4735,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
/**
* pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
* @dev: Device to reset.
- * @probe: If set, only check if the device can be reset this way.
+ * @probe: if true, return 0 if the device can be reset this way.
*
* If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
* unset, it will be reinitialized internally when going from PCI_D3hot to
@@ -4735,7 +4747,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
* by default (i.e. unless the @dev's d3hot_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
-static int pci_pm_reset(struct pci_dev *dev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, bool probe)
{
u16 csr;
@@ -4995,7 +5007,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
{
struct pci_dev *pdev;
@@ -5013,7 +5025,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
return pci_bridge_secondary_bus_reset(dev->bus->self);
}
-static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
{
int rc = -ENOTTY;
@@ -5028,7 +5040,7 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
return rc;
}
-static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
{
if (dev->multifunction || dev->subordinate || !dev->slot ||
dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
@@ -5037,7 +5049,7 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
-static int pci_reset_bus_function(struct pci_dev *dev, int probe)
+static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
int rc;
@@ -5121,6 +5133,139 @@ static void pci_dev_restore(struct pci_dev *dev)
err_handler->reset_done(dev);
}
+/* dev->reset_methods[] is a 0-terminated list of indices into this array */
+static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+ { },
+ { pci_dev_specific_reset, .name = "device_specific" },
+ { pci_dev_acpi_reset, .name = "acpi" },
+ { pcie_reset_flr, .name = "flr" },
+ { pci_af_flr, .name = "af_flr" },
+ { pci_pm_reset, .name = "pm" },
+ { pci_reset_bus_function, .name = "bus" },
+};
+
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ options = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ while ((name = strsep(&options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ goto error;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ goto error;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ goto error;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ kfree(options);
+ return count;
+
+error:
+ /* Leave previous methods unchanged */
+ kfree(options);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_reset_supported(pdev))
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_method_attr_is_visible,
+};
+
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
@@ -5143,66 +5288,64 @@ static void pci_dev_restore(struct pci_dev *dev)
*/
int __pci_reset_function_locked(struct pci_dev *dev)
{
- int rc;
+ int i, m, rc = -ENOTTY;
might_sleep();
/*
- * A reset method returns -ENOTTY if it doesn't support this device
- * and we should try the next method.
+ * A reset method returns -ENOTTY if it doesn't support this device and
+ * we should try the next method.
*
- * If it returns 0 (success), we're finished. If it returns any
- * other error, we're also finished: this indicates that further
- * reset mechanisms might be broken on the device.
+ * If it returns 0 (success), we're finished. If it returns any other
+ * error, we're also finished: this indicates that further reset
+ * mechanisms might be broken on the device.
*/
- rc = pci_dev_specific_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = dev->reset_methods[i];
+ if (!m)
+ return -ENOTTY;
+
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ return 0;
if (rc != -ENOTTY)
return rc;
}
- rc = pci_af_flr(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- return pci_reset_bus_function(dev, 0);
+
+ return -ENOTTY;
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
/**
- * pci_probe_reset_function - check whether the device can be safely reset
- * @dev: PCI device to reset
+ * pci_init_reset_methods - check whether device can be safely reset
+ * and store supported reset mechanisms.
+ * @dev: PCI device to check for reset mechanisms
*
* Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
+ * other functions in the same device. The PCI device must be in D0-D3hot
+ * state.
*
- * Returns 0 if the device function can be reset or negative if the
- * device doesn't support resetting a single function.
+ * Stores reset mechanisms supported by device in reset_methods byte array
+ * which is a member of struct pci_dev.
*/
-int pci_probe_reset_function(struct pci_dev *dev)
+void pci_init_reset_methods(struct pci_dev *dev)
{
- int rc;
+ int m, i, rc;
+
+ BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
might_sleep();
- rc = pci_dev_specific_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev))
- return 0;
- rc = pci_af_flr(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
+ i = 0;
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
+ if (!rc)
+ dev->reset_methods[i++] = m;
+ else if (rc != -ENOTTY)
+ break;
+ }
- return pci_reset_bus_function(dev, 1);
+ dev->reset_methods[i] = 0;
}
/**
@@ -5225,7 +5368,7 @@ int pci_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_lock(dev);
@@ -5261,7 +5404,7 @@ int pci_reset_function_locked(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_save_and_disable(dev);
@@ -5284,7 +5427,7 @@ int pci_try_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
if (!pci_dev_trylock(dev))
@@ -5512,7 +5655,7 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
}
}
-static int pci_slot_reset(struct pci_slot *slot, int probe)
+static int pci_slot_reset(struct pci_slot *slot, bool probe)
{
int rc;
@@ -5540,7 +5683,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
*/
int pci_probe_reset_slot(struct pci_slot *slot)
{
- return pci_slot_reset(slot, 1);
+ return pci_slot_reset(slot, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
@@ -5563,14 +5706,14 @@ static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
- rc = pci_slot_reset(slot, 1);
+ rc = pci_slot_reset(slot, PCI_RESET_PROBE);
if (rc)
return rc;
if (pci_slot_trylock(slot)) {
pci_slot_save_and_disable_locked(slot);
might_sleep();
- rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
pci_slot_restore_locked(slot);
pci_slot_unlock(slot);
} else
@@ -5579,7 +5722,7 @@ static int __pci_reset_slot(struct pci_slot *slot)
return rc;
}
-static int pci_bus_reset(struct pci_bus *bus, int probe)
+static int pci_bus_reset(struct pci_bus *bus, bool probe)
{
int ret;
@@ -5625,14 +5768,14 @@ int pci_bus_error_reset(struct pci_dev *bridge)
goto bus_reset;
list_for_each_entry(slot, &bus->slots, list)
- if (pci_slot_reset(slot, 0))
+ if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
goto bus_reset;
mutex_unlock(&pci_slot_mutex);
return 0;
bus_reset:
mutex_unlock(&pci_slot_mutex);
- return pci_bus_reset(bridge->subordinate, 0);
+ return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
}
/**
@@ -5643,7 +5786,7 @@ bus_reset:
*/
int pci_probe_reset_bus(struct pci_bus *bus)
{
- return pci_bus_reset(bus, 1);
+ return pci_bus_reset(bus, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
@@ -5657,7 +5800,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
- rc = pci_bus_reset(bus, 1);
+ rc = pci_bus_reset(bus, PCI_RESET_PROBE);
if (rc)
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2f52110cac97..1cce56c2aea0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -33,10 +33,32 @@ enum pci_mmap_api {
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
-int pci_probe_reset_function(struct pci_dev *dev);
+bool pci_reset_supported(struct pci_dev *dev);
+void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[];
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
+};
+
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+void pci_free_cap_save_buffers(struct pci_dev *dev);
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
+ u16 cap, unsigned int size);
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ u16 cap);
+
#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
@@ -100,8 +122,6 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
-void pci_allocate_cap_save_buffers(struct pci_dev *dev);
-void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
@@ -604,13 +624,18 @@ static inline void pci_ptm_init(struct pci_dev *dev) { }
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
- int (*reset)(struct pci_dev *dev, int probe);
+ int (*reset)(struct pci_dev *dev, bool probe);
+};
+
+struct pci_reset_fn_method {
+ int (*reset_fn)(struct pci_dev *pdev, bool probe);
+ char *name;
};
#ifdef CONFIG_PCI_QUIRKS
-int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
#else
-static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
}
@@ -698,7 +723,15 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL
#ifdef CONFIG_ACPI
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
+void pci_set_acpi_fwnode(struct pci_dev *dev);
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe);
#else
+static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ return -ENOTTY;
+}
+
+static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {}
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
return -ENODEV;
@@ -709,4 +742,6 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
+extern const struct attribute_group pci_dev_reset_method_attr_group;
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index df4ba9b384c2..9784fdcf3006 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1407,13 +1407,11 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
}
if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
- pci_info(dev, "has been reset (%d)\n", rc);
- } else {
- pci_info(dev, "not reset (no FLR support)\n");
- rc = -ENOTTY;
- }
+ rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ pci_info(dev, "has been reset\n");
+ else
+ pci_info(dev, "not reset (no FLR support: %d)\n", rc);
} else {
rc = pci_bus_error_reset(dev);
pci_info(dev, "%s Port link has been reset (%d)\n",
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e1fed6649c41..3ee63968deaa 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -257,8 +257,13 @@ static int get_port_device_capability(struct pci_dev *dev)
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ u32 linkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
+ if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
+ }
return services;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 8a4ad974c5ac..368a254e3124 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -60,10 +60,8 @@ void pci_save_ptm_state(struct pci_dev *dev)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for PTM\n");
+ if (!save_state)
return;
- }
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 79177ac37880..d9fc02a71baa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -19,6 +19,7 @@
#include <linux/hypervisor.h>
#include <linux/irqdomain.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -594,6 +595,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
+ bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
device_initialize(&bridge->dev);
}
@@ -828,11 +830,15 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
{
struct irq_domain *d;
+ /* If the host bridge driver sets a MSI domain of the bridge, use it */
+ d = dev_get_msi_domain(bus->bridge);
+
/*
* Any firmware interface that can resolve the msi_domain
* should be called from here.
*/
- d = pci_host_bridge_of_msi_domain(bus);
+ if (!d)
+ d = pci_host_bridge_of_msi_domain(bus);
if (!d)
d = pci_host_bridge_acpi_msi_domain(bus);
@@ -898,7 +904,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ else
+ bus->domain_nr = bridge->domain_nr;
#endif
b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
@@ -1498,8 +1507,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
- pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
+ pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
parent = pci_upstream_bridge(pdev);
if (!parent)
@@ -1809,6 +1818,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
+ pci_set_of_node(dev);
+ pci_set_acpi_fwnode(dev);
+
pci_dev_assign_slot(dev);
/*
@@ -1946,6 +1958,7 @@ int pci_setup_device(struct pci_dev *dev)
default: /* unknown header */
pci_err(dev, "unknown header type %02x, ignoring device\n",
dev->hdr_type);
+ pci_release_of_node(dev);
return -EIO;
bad:
@@ -2225,7 +2238,6 @@ static void pci_release_capabilities(struct pci_dev *dev)
{
pci_aer_exit(dev);
pci_rcec_exit(dev);
- pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
}
@@ -2374,10 +2386,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
- pci_set_of_node(dev);
-
if (pci_setup_device(dev)) {
- pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
@@ -2428,9 +2437,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_rcec_init(dev); /* Root Complex Event Collector */
pcie_report_downtraining(dev);
-
- if (pci_probe_reset_function(dev) == 0)
- dev->reset_fn = 1;
+ pci_init_reset_methods(dev);
}
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index d32fbfc93ea9..cb18f8a13ab6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -83,6 +83,7 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
buf += 4;
pos += 4;
cnt -= 4;
+ cond_resched();
}
if (cnt >= 2) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ab3de1551b50..e5089af8ad90 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1822,6 +1822,45 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
/*
+ * HiSilicon KunPeng920 and KunPeng930 have devices appear as PCI but are
+ * actually on the AMBA bus. These fake PCI devices can support SVA via
+ * SMMU stall feature, by setting dma-can-stall for ACPI platforms.
+ *
+ * Normally stalling must not be enabled for PCI devices, since it would
+ * break the PCI requirement for free-flowing writes and may lead to
+ * deadlock. We expect PCI devices to support ATS and PRI if they want to
+ * be fault-tolerant, so there's no ACPI binding to describe anything else,
+ * even when a "PCI" device turns out to be a regular old SoC device
+ * dressed up as a RCiEP and normal rules don't apply.
+ */
+static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
+{
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("dma-can-stall"),
+ {},
+ };
+
+ if (pdev->revision != 0x21 && pdev->revision != 0x30)
+ return;
+
+ pdev->pasid_no_tlp = 1;
+
+ /*
+ * Set the dma-can-stall property on ACPI platforms. Device tree
+ * can set it directly.
+ */
+ if (!pdev->dev.of_node &&
+ device_add_properties(&pdev->dev, properties))
+ pci_warn(pdev, "could not add stall property");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+
+/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
* together on certain PXH-based systems.
*/
@@ -3235,12 +3274,13 @@ static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
/*
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
@@ -3703,7 +3743,7 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
-static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
{
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
@@ -3725,7 +3765,7 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
#define NSDE_PWR_STATE 0xd0100
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
-static int reset_ivb_igd(struct pci_dev *dev, int probe)
+static int reset_ivb_igd(struct pci_dev *dev, bool probe)
{
void __iomem *mmio_base;
unsigned long timeout;
@@ -3768,7 +3808,7 @@ reset_complete:
}
/* Device-specific reset method for Chelsio T4-based adapters */
-static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
{
u16 old_command;
u16 msix_flags;
@@ -3846,14 +3886,14 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
* Chapter 3: NVMe control registers
* Chapter 7.3: Reset behavior
*/
-static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
+static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
{
void __iomem *bar;
u16 cmd;
u32 cfg;
if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
- !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
+ pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
return -ENOTTY;
if (probe)
@@ -3920,15 +3960,12 @@ static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
* device too soon after FLR. A 250ms delay after FLR has heuristically
* proven to produce reliably working results for device assignment cases.
*/
-static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
{
- if (!pcie_has_flr(dev))
- return -ENOTTY;
-
if (probe)
- return 0;
+ return pcie_reset_flr(dev, PCI_RESET_PROBE);
- pcie_flr(dev);
+ pcie_reset_flr(dev, PCI_RESET_DO_RESET);
msleep(250);
@@ -3943,7 +3980,7 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
-static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe)
{
unsigned long timeout;
void __iomem *bar;
@@ -4020,7 +4057,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
-int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
const struct pci_dev_reset_methods *i;
@@ -4615,6 +4652,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+/*
+ * Each of these NXP Root Ports is in a Root Complex with a unique segment
+ * number and does provide isolation features to disable peer transactions
+ * and validate bus numbers in requests, but does not provide an ACS
+ * capability.
+ */
+static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
{
if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
@@ -4841,6 +4890,10 @@ static const struct pci_dev_acs_enabled {
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
+ /* Cavium multi-function devices */
+ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
/* Ampere Computing */
@@ -4861,6 +4914,39 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* NXP root ports, xx=16, 12, or 08 cores */
+ /* LX2xx0A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
+ /* LX2xx0C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
+ /* LX2xx0E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
+ /* LX2xx0N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
+ /* LX2xx2A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
+ /* LX2xx2C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
@@ -5032,7 +5118,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
- if (dev->external_facing || dev->untrusted)
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
@@ -5630,7 +5716,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
pdev->subsystem_device != 0x222e ||
- !pdev->reset_fn)
+ !pci_reset_supported(pdev))
return;
if (pci_enable_device_mem(pdev))
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index dd12c2fcc7dc..4c54c75050dc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,7 +19,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
- dev->reset_fn = 0;
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 8b003c890b87..61a6fe3cde21 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -19,11 +19,12 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u8 byte;
u16 word;
u32 dword;
- long err;
- int cfg_ret;
+ int err, cfg_ret;
+ err = -EPERM;
+ dev = NULL;
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ goto error;
err = -ENODEV;
dev = pci_get_domain_bus_and_slot(0, bus, dfn);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 26bf7c877de5..25557b272a4f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -9,116 +9,94 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
+#include <asm/unaligned.h>
#include "pci.h"
-/* VPD access through PCI 2.2+ VPD capability */
+#define PCI_VPD_LRDT_TAG_SIZE 3
+#define PCI_VPD_SRDT_LEN_MASK 0x07
+#define PCI_VPD_SRDT_TAG_SIZE 1
+#define PCI_VPD_STIN_END 0x0f
+#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-struct pci_vpd_ops {
- ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
- ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-};
+static u16 pci_vpd_lrdt_size(const u8 *lrdt)
+{
+ return get_unaligned_le16(lrdt + 1);
+}
-struct pci_vpd {
- const struct pci_vpd_ops *ops;
- struct mutex lock;
- unsigned int len;
- u16 flag;
- u8 cap;
- unsigned int busy:1;
- unsigned int valid:1;
-};
+static u8 pci_vpd_srdt_tag(const u8 *srdt)
+{
+ return *srdt >> 3;
+}
-static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
+static u8 pci_vpd_srdt_size(const u8 *srdt)
{
- return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ return *srdt & PCI_VPD_SRDT_LEN_MASK;
}
-/**
- * pci_read_vpd - Read one entry from Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to read
- * @buf: pointer to where to store result
- */
-ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+static u8 pci_vpd_info_field_size(const u8 *info_field)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->read(dev, pos, count, buf);
+ return info_field[2];
}
-EXPORT_SYMBOL(pci_read_vpd);
-/**
- * pci_write_vpd - Write entry to Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to write
- * @buf: buffer containing write data
- */
-ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+/* VPD access through PCI 2.2+ VPD capability */
+
+static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->write(dev, pos, count, buf);
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
-EXPORT_SYMBOL(pci_write_vpd);
-#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_SZ_INVALID UINT_MAX
/**
* pci_vpd_size - determine actual size of Vital Product Data
* @dev: pci device struct
- * @old_size: current assumed size, also maximum allowed size
*/
-static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
+static size_t pci_vpd_size(struct pci_dev *dev)
{
- size_t off = 0;
- unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
+ size_t off = 0, size;
+ unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */
- while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
- unsigned char tag;
+ /* Otherwise the following reads would fail. */
+ dev->vpd.len = PCI_VPD_MAX_SIZE;
- if (!header[0] && !off) {
- pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
- return 0;
- }
+ while (pci_read_vpd(dev, off, 1, header) == 1) {
+ size = 0;
+
+ if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
+ goto error;
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
- tag = pci_vpd_lrdt_tag(header);
- /* Only read length from known tag items */
- if ((tag == PCI_VPD_LTIN_ID_STRING) ||
- (tag == PCI_VPD_LTIN_RO_DATA) ||
- (tag == PCI_VPD_LTIN_RW_DATA)) {
- if (pci_read_vpd(dev, off+1, 2,
- &header[1]) != 2) {
- pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
- tag, off + 1);
- return 0;
- }
- off += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(header);
+ if (pci_read_vpd(dev, off + 1, 2, &header[1]) != 2) {
+ pci_warn(dev, "failed VPD read at offset %zu\n",
+ off + 1);
+ return off ?: PCI_VPD_SZ_INVALID;
}
+ size = pci_vpd_lrdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
+
+ off += PCI_VPD_LRDT_TAG_SIZE + size;
} else {
/* Short Resource Data Type Tag */
- off += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(header);
tag = pci_vpd_srdt_tag(header);
- }
-
- if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
- return off;
+ size = pci_vpd_srdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
- if ((tag != PCI_VPD_LTIN_ID_STRING) &&
- (tag != PCI_VPD_LTIN_RO_DATA) &&
- (tag != PCI_VPD_LTIN_RW_DATA)) {
- pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
- (header[0] & PCI_VPD_LRDT) ? "large" : "short",
- tag, off);
- return 0;
+ off += PCI_VPD_SRDT_TAG_SIZE + size;
+ if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
+ return off;
}
}
- return 0;
+ return off;
+
+error:
+ pci_info(dev, "invalid VPD tag %#04x (size %zu) at offset %zu%s\n",
+ header[0], size, off, off == 0 ?
+ "; assume missing optional EEPROM" : "");
+ return off ?: PCI_VPD_SZ_INVALID;
}
/*
@@ -126,33 +104,26 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
+ * @set: if true wait for flag to be set, else wait for it to be cleared
*
* Returns 0 on success, negative values indicate error.
*/
-static int pci_vpd_wait(struct pci_dev *dev)
+static int pci_vpd_wait(struct pci_dev *dev, bool set)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
unsigned long timeout = jiffies + msecs_to_jiffies(125);
unsigned long max_sleep = 16;
u16 status;
int ret;
- if (!vpd->busy)
- return 0;
-
do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
return ret;
- if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
- vpd->busy = 0;
+ if (!!(status & PCI_VPD_ADDR_F) == set)
return 0;
- }
-
- if (fatal_signal_pending(current))
- return -EINTR;
if (time_after(jiffies, timeout))
break;
@@ -169,22 +140,17 @@ static int pci_vpd_wait(struct pci_dev *dev)
static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
- int ret;
+ struct pci_vpd *vpd = &dev->vpd;
+ int ret = 0;
loff_t end = pos + count;
u8 *buf = arg;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0)
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (pos > vpd->len)
return 0;
@@ -196,21 +162,20 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
u32 val;
unsigned int i, skip;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
pos & ~3);
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = PCI_VPD_ADDR_F;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, true);
if (ret < 0)
break;
@@ -228,7 +193,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
val >>= 8;
}
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
@@ -236,41 +201,26 @@ out:
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0 || (pos & 3) || (count & 3))
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (end > vpd->len)
return -EINVAL;
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
- u32 val;
-
- val = *buf++;
- val |= *buf++ << 8;
- val |= *buf++ << 16;
- val |= *buf++ << 24;
-
- ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA,
+ get_unaligned_le32(buf));
if (ret < 0)
break;
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
@@ -278,85 +228,28 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = 0;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, false);
if (ret < 0)
break;
+ buf += sizeof(u32);
pos += sizeof(u32);
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
-static const struct pci_vpd_ops pci_vpd_ops = {
- .read = pci_vpd_read,
- .write = pci_vpd_write,
-};
-
-static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_read_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_write_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static const struct pci_vpd_ops pci_vpd_f0_ops = {
- .read = pci_vpd_f0_read,
- .write = pci_vpd_f0_write,
-};
-
void pci_vpd_init(struct pci_dev *dev)
{
- struct pci_vpd *vpd;
- u8 cap;
+ dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ mutex_init(&dev->vpd.lock);
- cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
- if (!cap)
- return;
-
- vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
- if (!vpd)
- return;
-
- vpd->len = PCI_VPD_MAX_SIZE;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
- vpd->ops = &pci_vpd_f0_ops;
- else
- vpd->ops = &pci_vpd_ops;
- mutex_init(&vpd->lock);
- vpd->cap = cap;
- vpd->busy = 0;
- vpd->valid = 0;
- dev->vpd = vpd;
-}
+ if (!dev->vpd.len)
+ dev->vpd.len = pci_vpd_size(dev);
-void pci_vpd_release(struct pci_dev *dev)
-{
- kfree(dev->vpd);
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -388,7 +281,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->vpd)
+ if (!pdev->vpd.cap)
return 0;
return a->attr.mode;
@@ -399,23 +292,63 @@ const struct attribute_group pci_dev_vpd_attr_group = {
.is_bin_visible = vpd_attr_is_visible,
};
-int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
+{
+ unsigned int len = dev->vpd.len;
+ void *buf;
+ int cnt;
+
+ if (!dev->vpd.cap)
+ return ERR_PTR(-ENODEV);
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cnt = pci_read_vpd(dev, 0, len, buf);
+ if (cnt != len) {
+ kfree(buf);
+ return ERR_PTR(-EIO);
+ }
+
+ if (size)
+ *size = len;
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_alloc);
+
+static int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt, unsigned int *size)
{
int i = 0;
/* look for LRDT tags only, end tag is the only SRDT tag */
while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
- if (buf[i] == rdt)
+ unsigned int lrdt_len = pci_vpd_lrdt_size(buf + i);
+ u8 tag = buf[i];
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (tag == rdt) {
+ if (i + lrdt_len > len)
+ lrdt_len = len - i;
+ if (size)
+ *size = lrdt_len;
return i;
+ }
- i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
+ i += lrdt_len;
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size)
+{
+ return pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_ID_STRING, size);
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_id_string);
+
+static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
unsigned int len, const char *kw)
{
int i;
@@ -431,7 +364,106 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_read(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_write(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size)
+{
+ int ro_start, infokw_start;
+ unsigned int ro_len, infokw_size;
+
+ ro_start = pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_RO_DATA, &ro_len);
+ if (ro_start < 0)
+ return ro_start;
+
+ infokw_start = pci_vpd_find_info_keyword(buf, ro_start, ro_len, kw);
+ if (infokw_start < 0)
+ return infokw_start;
+
+ infokw_size = pci_vpd_info_field_size(buf + infokw_start);
+ infokw_start += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (infokw_start + infokw_size > len)
+ return -EINVAL;
+
+ if (size)
+ *size = infokw_size;
+
+ return infokw_start;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_ro_info_keyword);
+
+int pci_vpd_check_csum(const void *buf, unsigned int len)
+{
+ const u8 *vpd = buf;
+ unsigned int size;
+ u8 csum = 0;
+ int rv_start;
+
+ rv_start = pci_vpd_find_ro_info_keyword(buf, len, PCI_VPD_RO_KEYWORD_CHKSUM, &size);
+ if (rv_start == -ENOENT) /* no checksum in VPD */
+ return 1;
+ else if (rv_start < 0)
+ return rv_start;
+
+ if (!size)
+ return -EINVAL;
+
+ while (rv_start >= 0)
+ csum += vpd[rv_start--];
+
+ return csum ? -EILSEQ : 0;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_check_csum);
#ifdef CONFIG_PCI_QUIRKS
/*
@@ -450,7 +482,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev)
if (!f0)
return;
- if (f0->vpd && dev->class == f0->class &&
+ if (f0->vpd.cap && dev->class == f0->class &&
dev->vendor == f0->vendor && dev->device == f0->device)
dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
@@ -468,41 +500,27 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_blacklist_vpd(struct pci_dev *dev)
{
- if (dev->vpd) {
- dev->vpd->len = 0;
- pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
- }
+ dev->vpd.len = PCI_VPD_SZ_INVALID;
+ pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
- quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
*/
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
- PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
-
-static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
- return;
-
- vpd->valid = 1;
- vpd->len = len;
-}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
@@ -522,12 +540,12 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
* limits.
*/
if (chip == 0x0 && prod >= 0x20)
- pci_vpd_set_size(dev, 8192);
+ dev->vpd.len = 8192;
else if (chip >= 0x4 && func < 0x8)
- pci_vpd_set_size(dev, 2048);
+ dev->vpd.len = 2048;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
- quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_extend_vpd);
#endif
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 3e491dfb2525..937a14fa7448 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <linux/units.h>
#define STM32_USBPHYC_PLL 0x0
#define STM32_USBPHYC_MISC 0x8
@@ -47,7 +48,6 @@
#define PLL_FVCO_MHZ 2880
#define PLL_INFF_MIN_RATE_HZ 19200000
#define PLL_INFF_MAX_RATE_HZ 38400000
-#define HZ_PER_MHZ 1000000L
struct pll_params {
u8 ndiv;
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 41baccba033f..f901d2e43166 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index aa7f7aa77297..a7404d69b2d3 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
msg->insize = sizeof(struct ec_response_get_protocol_info);
ret = send_command(ec_dev, msg);
+ /*
+ * Send command once again when timeout occurred.
+ * Fingerprint MCU (FPMCU) is restarted during system boot which
+ * introduces small window in which FPMCU won't respond for any
+ * messages sent by kernel. There is no need to wait before next
+ * attempt because we waited at least EC_MSG_DEADLINE_MS.
+ */
+ if (ret == -ETIMEDOUT)
+ ret = send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 8921f24e83ba..98e37080f760 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -17,6 +17,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
+#include "cros_ec_trace.h"
+
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
@@ -291,6 +293,7 @@ cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
state->median_m = 0;
state->median_error = 0;
}
+ trace_cros_ec_sensorhub_filter(state, dx, dy);
}
/**
@@ -427,6 +430,11 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
if (new_timestamp - *current_timestamp > 0)
*current_timestamp = new_timestamp;
}
+ trace_cros_ec_sensorhub_timestamp(in->timestamp,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
@@ -460,6 +468,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
/* Regular sample */
out->sensor_id = in->sensor_num;
+ trace_cros_ec_sensorhub_data(in->sensor_num,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
+
if (*current_timestamp - now > 0) {
/*
* This fix is needed to overcome the timestamp filter putting
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index f744b21bc655..7e7cfc98657a 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/tracepoint.h>
@@ -70,6 +71,99 @@ TRACE_EVENT(cros_ec_request_done,
__entry->retval)
);
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
#endif /* _CROS_EC_TRACE_H_ */
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 27c068c4c38d..262a891eded3 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -1054,24 +1054,6 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
-/* Check the EC feature flags to see if TYPEC_* features are supported. */
-static int cros_typec_feature_supported(struct cros_typec_data *typec, enum ec_feature_code feature)
-{
- struct ec_response_get_features resp = {};
- int ret;
-
- ret = cros_typec_ec_command(typec, 0, EC_CMD_GET_FEATURES, NULL, 0,
- &resp, sizeof(resp));
- if (ret < 0) {
- dev_warn(typec->dev,
- "Failed to get features, assuming typec feature=%d unsupported.\n",
- feature);
- return 0;
- }
-
- return resp.flags[feature / 32] & EC_FEATURE_MASK_1(feature);
-}
-
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
@@ -1113,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, cros_typec_of_match);
static int cros_typec_probe(struct platform_device *pdev)
{
+ struct cros_ec_dev *ec_dev = NULL;
struct device *dev = &pdev->dev;
struct cros_typec_data *typec;
struct ec_response_usb_pd_ports resp;
@@ -1132,10 +1115,10 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
- typec->typec_cmd_supported = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_CMD);
- typec->needs_mux_ack = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+ ec_dev = dev_get_drvdata(&typec->ec->ec->dev);
+ typec->typec_cmd_supported = !!cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
+ typec->needs_mux_ack = !!cros_ec_check_features(ec_dev,
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index c76adedd58c9..aa29841bbb79 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -272,7 +272,7 @@ config PWM_IQS620A
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select MFD_SYSCON
help
@@ -284,7 +284,8 @@ config PWM_JZ4740
config PWM_KEEMBAY
tristate "Intel Keem Bay PWM driver"
- depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
help
The platform driver for Intel Keem Bay PWM controller.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 35e894f4a379..4527f09a5c50 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_remove(struct pwm_chip *chip)
+void pwmchip_remove(struct pwm_chip *chip)
{
pwmchip_sysfs_unexport(chip);
@@ -318,8 +318,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
mutex_unlock(&pwm_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(pwmchip_remove);
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index e2a26d9da25b..ad37bc46f272 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -22,14 +22,21 @@
struct ab8500_pwm_chip {
struct pwm_chip chip;
+ unsigned int hwid;
};
+static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ab8500_pwm_chip, chip);
+}
+
static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int ret;
u8 reg;
unsigned int higher_val, lower_val;
+ struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -37,7 +44,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 0);
+ 1 << ab8500->hwid, 0);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
@@ -56,7 +63,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
higher_val = ((state->duty_cycle & 0x0300) >> 8);
- reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
+ reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
reg, (u8)lower_val);
@@ -70,7 +77,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 1 << (chip->base - 1));
+ 1 << ab8500->hwid, 1 << ab8500->hwid);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
@@ -88,6 +95,9 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
struct ab8500_pwm_chip *ab8500;
int err;
+ if (pdev->id < 1 || pdev->id > 31)
+ return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
+
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
@@ -99,27 +109,13 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
ab8500->chip.npwm = 1;
+ ab8500->hwid = pdev->id - 1;
- err = pwmchip_add(&ab8500->chip);
+ err = devm_pwmchip_add(&pdev->dev, &ab8500->chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
dev_dbg(&pdev->dev, "pwm probe successful\n");
- platform_set_drvdata(pdev, ab8500);
-
- return 0;
-}
-
-static int ab8500_pwm_remove(struct platform_device *pdev)
-{
- struct ab8500_pwm_chip *ab8500 = platform_get_drvdata(pdev);
- int err;
-
- err = pwmchip_remove(&ab8500->chip);
- if (err < 0)
- return err;
-
- dev_dbg(&pdev->dev, "pwm driver removed\n");
return 0;
}
@@ -129,7 +125,6 @@ static struct platform_driver ab8500_pwm_driver = {
.name = "ab8500-pwm",
},
.probe = ab8500_pwm_probe,
- .remove = ab8500_pwm_remove,
};
module_platform_driver(ab8500_pwm_driver);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 4459325d3650..a43b2babc809 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -281,11 +281,8 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->hlcdc->periph_clk);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index bf398f21484d..36f7ea381838 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -503,11 +503,8 @@ err_slow_clk:
static int atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
- int err;
- err = pwmchip_remove(&tcbpwm->chip);
- if (err < 0)
- return err;
+ pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->slow_clk);
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index a8162bae3e8a..e748604403cc 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -84,9 +84,19 @@ struct atmel_pwm_chip {
void __iomem *base;
const struct atmel_pwm_data *data;
- unsigned int updated_pwms;
- /* ISR is cleared when read, ensure only one thread does that */
- struct mutex isr_lock;
+ /*
+ * The hardware supports a mechanism to update a channel's duty cycle at
+ * the end of the currently running period. When such an update is
+ * pending we delay disabling the PWM until the new configuration is
+ * active because otherwise pmw_config(duty_cycle=0); pwm_disable();
+ * might not result in an inactive output.
+ * This bitmask tracks for which channels an update is pending in
+ * hardware.
+ */
+ u32 update_pending;
+
+ /* Protects .update_pending */
+ spinlock_t lock;
};
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
@@ -123,6 +133,64 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
atmel_pwm_writel(chip, base + offset, val);
}
+static void atmel_pwm_update_pending(struct atmel_pwm_chip *chip)
+{
+ /*
+ * Each channel that has its bit in ISR set started a new period since
+ * ISR was cleared and so there is no more update pending. Note that
+ * reading ISR clears it, so this needs to handle all channels to not
+ * loose information.
+ */
+ u32 isr = atmel_pwm_readl(chip, PWM_ISR);
+
+ chip->update_pending &= ~isr;
+}
+
+static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ spin_lock(&chip->lock);
+
+ /*
+ * Clear pending flags in hardware because otherwise there might still
+ * be a stale flag in ISR.
+ */
+ atmel_pwm_update_pending(chip);
+
+ chip->update_pending |= (1 << ch);
+
+ spin_unlock(&chip->lock);
+}
+
+static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ int ret = 0;
+
+ spin_lock(&chip->lock);
+
+ if (chip->update_pending & (1 << ch)) {
+ atmel_pwm_update_pending(chip);
+
+ if (chip->update_pending & (1 << ch))
+ ret = 1;
+ }
+
+ spin_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int atmel_pwm_wait_nonpending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ unsigned long timeout = jiffies + 2 * HZ;
+ int ret;
+
+ while ((ret = atmel_pwm_test_pending(chip, ch)) &&
+ time_before(jiffies, timeout))
+ usleep_range(10, 100);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
unsigned long clkrate,
const struct pwm_state *state,
@@ -185,6 +253,7 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty_upd, cdty);
+ atmel_pwm_set_pending(atmel_pwm, pwm->hwpwm);
}
static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
@@ -205,20 +274,8 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned long timeout = jiffies + 2 * HZ;
- /*
- * Wait for at least a complete period to have passed before disabling a
- * channel to be sure that CDTY has been updated
- */
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
-
- while (!(atmel_pwm->updated_pwms & (1 << pwm->hwpwm)) &&
- time_before(jiffies, timeout)) {
- usleep_range(10, 100);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- }
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
/*
@@ -292,10 +349,6 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val |= PWM_CMR_CPOL;
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
} else if (cstate.enabled) {
atmel_pwm_disable(chip, pwm, true);
@@ -326,6 +379,9 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
tmp <<= pres;
state->period = DIV64_U64_ROUND_UP(tmp, rate);
+ /* Wait for an updated duty_cycle queued in hardware */
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
+
cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty);
tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
@@ -416,9 +472,10 @@ static int atmel_pwm_probe(struct platform_device *pdev)
if (!atmel_pwm)
return -ENOMEM;
- mutex_init(&atmel_pwm->isr_lock);
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
- atmel_pwm->updated_pwms = 0;
+
+ atmel_pwm->update_pending = 0;
+ spin_lock_init(&atmel_pwm->lock);
atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pwm->base))
@@ -460,7 +517,6 @@ static int atmel_pwm_remove(struct platform_device *pdev)
pwmchip_remove(&atmel_pwm->chip);
clk_unprepare(atmel_pwm->clk);
- mutex_destroy(&atmel_pwm->isr_lock);
return 0;
}
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 8c85c66ea5c9..64148f5f81d0 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -267,8 +267,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
if (kp == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, kp);
-
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.npwm = 6;
@@ -298,20 +296,13 @@ static int kona_pwmc_probe(struct platform_device *pdev)
clk_disable_unprepare(kp->clk);
- ret = pwmchip_add(&kp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
-static int kona_pwmc_remove(struct platform_device *pdev)
-{
- struct kona_pwmc *kp = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&kp->chip);
-}
-
static const struct of_device_id bcm_kona_pwmc_dt[] = {
{ .compatible = "brcm,kona-pwm" },
{ },
@@ -324,7 +315,6 @@ static struct platform_driver kona_pwmc_driver = {
.of_match_table = bcm_kona_pwmc_dt,
},
.probe = kona_pwmc_probe,
- .remove = kona_pwmc_remove,
};
module_platform_driver(kona_pwmc_driver);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 8b1d1e7aa856..3b529f82b97c 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -282,12 +282,11 @@ out_clk:
static int brcmstb_pwm_remove(struct platform_device *pdev)
{
struct brcmstb_pwm *p = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&p->chip);
+ pwmchip_remove(&p->chip);
clk_disable_unprepare(p->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 9fffb566af5f..5e29d9c682c3 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -280,7 +280,9 @@ static int cros_ec_pwm_remove(struct platform_device *dev)
struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
struct pwm_chip *chip = &ec_pwm->chip;
- return pwmchip_remove(chip);
+ pwmchip_remove(chip);
+
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index fc3cb7d669c6..c45a75e65c86 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -183,27 +183,18 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
ep93xx_pwm->chip.npwm = 1;
- ret = pwmchip_add(&ep93xx_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &ep93xx_pwm->chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, ep93xx_pwm);
return 0;
}
-static int ep93xx_pwm_remove(struct platform_device *pdev)
-{
- struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&ep93xx_pwm->chip);
-}
-
static struct platform_driver ep93xx_pwm_driver = {
.driver = {
.name = "ep93xx-pwm",
},
.probe = ep93xx_pwm_probe,
- .remove = ep93xx_pwm_remove,
};
module_platform_driver(ep93xx_pwm_driver);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 96ccd772280c..0247757f9a72 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -453,7 +453,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.npwm = 8;
- ret = pwmchip_add(&fpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &fpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
@@ -464,13 +464,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
return fsl_pwm_init(fpc);
}
-static int fsl_pwm_remove(struct platform_device *pdev)
-{
- struct fsl_pwm_chip *fpc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&fpc->chip);
-}
-
#ifdef CONFIG_PM_SLEEP
static int fsl_pwm_suspend(struct device *dev)
{
@@ -552,7 +545,6 @@ static struct platform_driver fsl_pwm_driver = {
.pm = &fsl_pwm_pm_ops,
},
.probe = fsl_pwm_probe,
- .remove = fsl_pwm_remove,
};
module_platform_driver(fsl_pwm_driver);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 4a6e9ad3c0ff..333f1b18ff4e 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -248,13 +248,15 @@ static int hibvt_pwm_remove(struct platform_device *pdev)
pwm_chip = platform_get_drvdata(pdev);
+ pwmchip_remove(&pwm_chip->chip);
+
reset_control_assert(pwm_chip->rstc);
msleep(30);
reset_control_deassert(pwm_chip->rstc);
clk_disable_unprepare(pwm_chip->clk);
- return pwmchip_remove(&pwm_chip->chip);
+ return 0;
}
static const struct of_device_id hibvt_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 11b16ecc4f96..f97f82548293 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -326,28 +326,14 @@ err_pm_disable:
static int img_pwm_remove(struct platform_device *pdev)
{
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
- u32 val;
- unsigned int i;
- int ret;
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put(&pdev->dev);
- return ret;
- }
-
- for (i = 0; i < pwm_chip->chip.npwm; i++) {
- val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
- val &= ~BIT(i);
- img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
- }
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_pwm_runtime_suspend(&pdev->dev);
- return pwmchip_remove(&pwm_chip->chip);
+ pwmchip_remove(&pwm_chip->chip);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index dbb50493abdd..e5e7b7c339a8 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -382,11 +382,12 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
static int pwm_imx_tpm_remove(struct platform_device *pdev)
{
struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
- int ret = pwmchip_remove(&tpm->chip);
+
+ pwmchip_remove(&tpm->chip);
clk_disable_unprepare(tpm->clk);
- return ret;
+ return 0;
}
static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index f6588a96fbd9..ea91a2f81a9f 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -313,8 +313,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (imx == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, imx);
-
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
@@ -342,16 +340,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (!(pwmcr & MX3_PWMCR_EN))
pwm_imx27_clk_disable_unprepare(imx);
- return pwmchip_add(&imx->chip);
-}
-
-static int pwm_imx27_remove(struct platform_device *pdev)
-{
- struct pwm_imx27_chip *imx;
-
- imx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&imx->chip);
+ return devm_pwmchip_add(&pdev->dev, &imx->chip);
}
static struct platform_driver imx_pwm_driver = {
@@ -360,7 +349,6 @@ static struct platform_driver imx_pwm_driver = {
.of_match_table = pwm_imx27_dt_ids,
},
.probe = pwm_imx27_probe,
- .remove = pwm_imx27_remove,
};
module_platform_driver(imx_pwm_driver);
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 015f5eba09a1..b66c35074087 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -176,8 +176,6 @@ static int lgm_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- platform_set_drvdata(pdev, pc);
-
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -210,20 +208,13 @@ static int lgm_pwm_probe(struct platform_device *pdev)
lgm_pwm_init(pc);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(dev, &pc->chip);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add PWM chip\n");
return 0;
}
-static int lgm_pwm_remove(struct platform_device *pdev)
-{
- struct lgm_pwm_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct of_device_id lgm_pwm_of_match[] = {
{ .compatible = "intel,lgm-pwm" },
{ }
@@ -236,7 +227,6 @@ static struct platform_driver lgm_pwm_driver = {
.of_match_table = lgm_pwm_of_match,
},
.probe = lgm_pwm_probe,
- .remove = lgm_pwm_remove,
};
module_platform_driver(lgm_pwm_driver);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 6c6e26d18329..54bd95a5cab0 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -189,7 +189,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (!iqs620_pwm)
return -ENOMEM;
- platform_set_drvdata(pdev, iqs620_pwm);
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
@@ -224,31 +223,18 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = pwmchip_add(&iqs620_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
return ret;
}
-static int iqs620_pwm_remove(struct platform_device *pdev)
-{
- struct iqs620_pwm_private *iqs620_pwm = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&iqs620_pwm->chip);
- if (ret)
- dev_err(&pdev->dev, "Failed to remove device: %d\n", ret);
-
- return ret;
-}
-
static struct platform_driver iqs620_pwm_platform_driver = {
.driver = {
.name = "iqs620a-pwm",
},
.probe = iqs620_pwm_probe,
- .remove = iqs620_pwm_remove,
};
module_platform_driver(iqs620_pwm_platform_driver);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 990e7904c7f1..23dc1fb770e2 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -245,16 +245,7 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- platform_set_drvdata(pdev, jz4740);
-
- return pwmchip_add(&jz4740->chip);
-}
-
-static int jz4740_pwm_remove(struct platform_device *pdev)
-{
- struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&jz4740->chip);
+ return devm_pwmchip_add(dev, &jz4740->chip);
}
static const struct soc_info __maybe_unused jz4740_soc_info = {
@@ -280,7 +271,6 @@ static struct platform_driver jz4740_pwm_driver = {
.of_match_table = of_match_ptr(jz4740_pwm_dt_ids),
},
.probe = jz4740_pwm_probe,
- .remove = jz4740_pwm_remove,
};
module_platform_driver(jz4740_pwm_driver);
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index 521a825c8ba0..733811b05721 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -207,22 +207,13 @@ static int keembay_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(dev, &priv->chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
- platform_set_drvdata(pdev, priv);
-
return 0;
}
-static int keembay_pwm_remove(struct platform_device *pdev)
-{
- struct keembay_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->chip);
-}
-
static const struct of_device_id keembay_pwm_of_match[] = {
{ .compatible = "intel,keembay-pwm" },
{ }
@@ -231,7 +222,6 @@ MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
static struct platform_driver keembay_pwm_driver = {
.probe = keembay_pwm_probe,
- .remove = keembay_pwm_remove,
.driver = {
.name = "pwm-keembay",
.of_match_table = keembay_pwm_of_match,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 7551253ada32..ea17d446a627 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -276,16 +276,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
- platform_set_drvdata(pdev, lp3943_pwm);
-
- return pwmchip_add(&lp3943_pwm->chip);
-}
-
-static int lp3943_pwm_remove(struct platform_device *pdev)
-{
- struct lp3943_pwm *lp3943_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lp3943_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
}
#ifdef CONFIG_OF
@@ -298,7 +289,6 @@ MODULE_DEVICE_TABLE(of, lp3943_pwm_of_match);
static struct platform_driver lp3943_pwm_driver = {
.probe = lp3943_pwm_probe,
- .remove = lp3943_pwm_remove,
.driver = {
.name = "lp3943-pwm",
.of_match_table = of_match_ptr(lp3943_pwm_of_match),
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 2834a0f001d3..ddeab5687cb8 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -117,29 +117,20 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
- ret = pwmchip_add(&lpc32xx->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
- return ret;
- }
-
- /* When PWM is disable, configure the output to the default value */
+ /* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
- platform_set_drvdata(pdev, lpc32xx);
+ ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
+ return ret;
+ }
return 0;
}
-static int lpc32xx_pwm_remove(struct platform_device *pdev)
-{
- struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lpc32xx->chip);
-}
-
static const struct of_device_id lpc32xx_pwm_dt_ids[] = {
{ .compatible = "nxp,lpc3220-pwm", },
{ /* sentinel */ }
@@ -152,7 +143,6 @@ static struct platform_driver lpc32xx_pwm_driver = {
.of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
- .remove = lpc32xx_pwm_remove,
};
module_platform_driver(lpc32xx_pwm_driver);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b4a31060bcd7..0d4dd80e9f07 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -253,13 +253,11 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, pc);
-
pc->chip.dev = &pdev->dev;
pc->chip.ops = &pwm_mediatek_ops;
pc->chip.npwm = pc->soc->num_pwms;
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -268,13 +266,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
return 0;
}
-static int pwm_mediatek_remove(struct platform_device *pdev)
-{
- struct pwm_mediatek_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
@@ -335,7 +326,6 @@ static struct platform_driver pwm_mediatek_driver = {
.of_match_table = pwm_mediatek_of_match,
},
.probe = pwm_mediatek_probe,
- .remove = pwm_mediatek_remove,
};
module_platform_driver(pwm_mediatek_driver);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 9b3ba401a3db..c605013e4114 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -5,6 +5,7 @@
* Author: YH Huang <yh.huang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -47,6 +48,7 @@ struct mtk_disp_pwm {
struct clk *clk_main;
struct clk *clk_mm;
void __iomem *base;
+ bool enabled;
};
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
@@ -66,14 +68,47 @@ static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
writel(value, address);
}
-static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
u32 clk_div, period, high_width, value;
u64 div, rate;
int err;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ 0x0);
+
+ if (mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
+
+ mdp->enabled = false;
+ return 0;
+ }
+
+ if (!mdp->enabled) {
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
+ ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return err;
+ }
+ }
+
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
@@ -85,29 +120,24 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* high_width = (PWM_CLK_RATE * duty_ns) / (10^9 * (clk_div + 1))
*/
rate = clk_get_rate(mdp->clk_main);
- clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
+ clk_div = mul_u64_u64_div_u64(state->period, rate, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
- if (clk_div > PWM_CLKDIV_MAX)
+ if (clk_div > PWM_CLKDIV_MAX) {
+ if (!mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
return -EINVAL;
+ }
div = NSEC_PER_SEC * (clk_div + 1);
- period = div64_u64(rate * period_ns, div);
+ period = mul_u64_u64_div_u64(state->period, rate, div);
if (period > 0)
period--;
- high_width = div64_u64(rate * duty_ns, div);
+ high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
- if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
- }
-
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
@@ -122,50 +152,70 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
0x0);
+ } else {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
}
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ mdp->data->enable_mask);
+ mdp->enabled = true;
return 0;
}
-static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ u64 rate, period, high_width;
+ u32 clk_div, con0, con1;
int err;
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
+ err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ return;
}
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- mdp->data->enable_mask);
-
- return 0;
-}
-
-static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
-
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- 0x0);
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return;
+ }
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ rate = clk_get_rate(mdp->clk_main);
+ con0 = readl(mdp->base + mdp->data->con0);
+ con1 = readl(mdp->base + mdp->data->con1);
+ state->enabled = !!(con0 & BIT(0));
+ clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
+ period = FIELD_GET(PWM_PERIOD_MASK, con1);
+ /*
+ * period has 12 bits, clk_div 11 and NSEC_PER_SEC has 30,
+ * so period * (clk_div + 1) * NSEC_PER_SEC doesn't overflow.
+ */
+ state->period = DIV64_U64_ROUND_UP(period * (clk_div + 1) * NSEC_PER_SEC, rate);
+ high_width = FIELD_GET(PWM_HIGH_WIDTH_MASK, con1);
+ state->duty_cycle = DIV64_U64_ROUND_UP(high_width * (clk_div + 1) * NSEC_PER_SEC,
+ rate);
+ state->polarity = PWM_POLARITY_NORMAL;
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
}
static const struct pwm_ops mtk_disp_pwm_ops = {
- .config = mtk_disp_pwm_config,
- .enable = mtk_disp_pwm_enable,
- .disable = mtk_disp_pwm_disable,
+ .apply = mtk_disp_pwm_apply,
+ .get_state = mtk_disp_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -192,58 +242,28 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
- ret = clk_prepare(mdp->clk_main);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare(mdp->clk_mm);
- if (ret < 0)
- goto disable_clk_main;
-
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- goto disable_clk_mm;
+ dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
+ return ret;
}
platform_set_drvdata(pdev, mdp);
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- if (!mdp->data->has_commit) {
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
- }
-
return 0;
-
-disable_clk_mm:
- clk_unprepare(mdp->clk_mm);
-disable_clk_main:
- clk_unprepare(mdp->clk_main);
- return ret;
}
static int mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&mdp->chip);
- clk_unprepare(mdp->clk_mm);
- clk_unprepare(mdp->clk_main);
+ pwmchip_remove(&mdp->chip);
- return ret;
+ return 0;
}
static const struct mtk_pwm_data mt2701_pwm_data = {
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index a22180803bd7..766dbc58dad8 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -145,30 +145,18 @@ static int mxs_pwm_probe(struct platform_device *pdev)
return ret;
}
- ret = pwmchip_add(&mxs->chip);
+ /* FIXME: Only do this if the PWM isn't already running */
+ ret = stmp_reset_block(mxs->base);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
+
+ ret = devm_pwmchip_add(&pdev->dev, &mxs->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, mxs);
-
- ret = stmp_reset_block(mxs->base);
- if (ret)
- goto pwm_remove;
-
return 0;
-
-pwm_remove:
- pwmchip_remove(&mxs->chip);
- return ret;
-}
-
-static int mxs_pwm_remove(struct platform_device *pdev)
-{
- struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&mxs->chip);
}
static const struct of_device_id mxs_pwm_dt_ids[] = {
@@ -183,7 +171,6 @@ static struct platform_driver mxs_pwm_driver = {
.of_match_table = mxs_pwm_dt_ids,
},
.probe = mxs_pwm_probe,
- .remove = mxs_pwm_remove,
};
module_platform_driver(mxs_pwm_driver);
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 50c454c553c4..ab63b081df53 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -150,23 +150,12 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
priv->ec = ec;
priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
-
chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &ntxec_pwm_ops;
- chip->base = -1;
chip->npwm = 1;
- return pwmchip_add(chip);
-}
-
-static int ntxec_pwm_remove(struct platform_device *pdev)
-{
- struct ntxec_pwm *priv = platform_get_drvdata(pdev);
- struct pwm_chip *chip = &priv->chip;
-
- return pwmchip_remove(chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver ntxec_pwm_driver = {
@@ -174,7 +163,6 @@ static struct platform_driver ntxec_pwm_driver = {
.name = "ntxec-pwm",
},
.probe = ntxec_pwm_probe,
- .remove = ntxec_pwm_remove,
};
module_platform_driver(ntxec_pwm_driver);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 507a2d945b90..fa800fcf31d4 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -444,11 +444,8 @@ err_find_timer_pdev:
static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&omap->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&omap->chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 42ed770b432c..c56001a790d0 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -601,11 +601,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
static int pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
- int ret;
- ret = pwmchip_remove(&pca->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pca->chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index e091a528e33c..a9efdcf839ae 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -195,32 +195,21 @@ static int pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pc);
return 0;
}
-static int pwm_remove(struct platform_device *pdev)
-{
- struct pxa_pwm_chip *pc;
-
- pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.of_match_table = pwm_of_match,
},
.probe = pwm_probe,
- .remove = pwm_remove,
.id_table = pwm_id_table,
};
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 043fc32e8be8..579a15240e0a 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -166,8 +166,6 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
rpipwm->chip.base = -1;
rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
- platform_set_drvdata(pdev, rpipwm);
-
ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
&rpipwm->duty_cycle);
if (ret) {
@@ -175,14 +173,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return ret;
}
- return pwmchip_add(&rpipwm->chip);
-}
-
-static int raspberrypi_pwm_remove(struct platform_device *pdev)
-{
- struct raspberrypi_pwm *rpipwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&rpipwm->chip);
+ return devm_pwmchip_add(dev, &rpipwm->chip);
}
static const struct of_device_id raspberrypi_pwm_of_match[] = {
@@ -197,7 +188,6 @@ static struct platform_driver raspberrypi_pwm_driver = {
.of_match_table = raspberrypi_pwm_of_match,
},
.probe = raspberrypi_pwm_probe,
- .remove = raspberrypi_pwm_remove,
};
module_platform_driver(raspberrypi_pwm_driver);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 9daca0c772c7..b437192380e2 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -241,13 +241,12 @@ static int rcar_pwm_probe(struct platform_device *pdev)
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&rcar_pwm->chip);
+ pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index b853e7942605..4381df90a527 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -425,13 +425,12 @@ static int tpu_probe(struct platform_device *pdev)
static int tpu_remove(struct platform_device *pdev)
{
struct tpu_device *tpu = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&tpu->chip);
+ pwmchip_remove(&tpu->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index cbe900877724..f3647b317152 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -384,24 +384,12 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
- /*
- * Disable the PWM clk before unpreparing it if the PWM device is still
- * running. This should only happen when the last PWM user left it
- * enabled, or when nobody requested a PWM that was previously enabled
- * by the bootloader.
- *
- * FIXME: Maybe the core should disable all PWM devices in
- * pwmchip_remove(). In this case we'd only have to call
- * clk_unprepare() after pwmchip_remove().
- *
- */
- if (pwm_is_enabled(pc->chip.pwms))
- clk_disable(pc->clk);
+ pwmchip_remove(&pc->chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static struct platform_driver rockchip_pwm_driver = {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index f6c528f02d43..dd94c4312a0c 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -580,11 +580,8 @@ static int pwm_samsung_probe(struct platform_device *pdev)
static int pwm_samsung_remove(struct platform_device *pdev)
{
struct samsung_pwm_chip *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret < 0)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->base_clk);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 420edc4aa94a..253c4a17d255 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -291,7 +291,7 @@ static int pwm_sifive_remove(struct platform_device *dev)
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
bool is_enabled = false;
struct pwm_device *pwm;
- int ret, ch;
+ int ch;
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
@@ -304,10 +304,10 @@ static int pwm_sifive_remove(struct platform_device *dev)
clk_disable(ddata->clk);
clk_disable_unprepare(ddata->clk);
- ret = pwmchip_remove(&ddata->chip);
+ pwmchip_remove(&ddata->chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
- return ret;
+ return 0;
}
static const struct of_device_id pwm_sifive_of_match[] = {
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 7a69c1a0c060..589aeaaa6ac8 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -231,9 +231,7 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
- platform_set_drvdata(pdev, priv);
-
- ret = pwmchip_add(&priv->pwm_chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->pwm_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
@@ -243,13 +241,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return 0;
}
-static int sl28cpld_pwm_remove(struct platform_device *pdev)
-{
- struct sl28cpld_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->pwm_chip);
-}
-
static const struct of_device_id sl28cpld_pwm_of_match[] = {
{ .compatible = "kontron,sl28cpld-pwm" },
{}
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, sl28cpld_pwm_of_match);
static struct platform_driver sl28cpld_pwm_driver = {
.probe = sl28cpld_pwm_probe,
- .remove = sl28cpld_pwm_remove,
.driver = {
.name = "sl28cpld-pwm",
.of_match_table = sl28cpld_pwm_of_match,
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 93dd03618465..3115abb3f52a 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -209,7 +209,7 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
if (ret < 0)
return ret;
@@ -218,15 +218,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
return 0;
}
-static int stm32_pwm_lp_remove(struct platform_device *pdev)
-{
- struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
-
- pwm_disable(&priv->chip.pwms[0]);
-
- return pwmchip_remove(&priv->chip);
-}
-
static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
{
struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, stm32_pwm_lp_of_match);
static struct platform_driver stm32_pwm_lp_driver = {
.probe = stm32_pwm_lp_probe,
- .remove = stm32_pwm_lp_remove,
.driver = {
.name = "stm32-pwm-lp",
.of_match_table = of_match_ptr(stm32_pwm_lp_of_match),
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index c952604e91f3..91ca67651abd 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -484,11 +484,8 @@ err_bus:
static int sun4i_pwm_remove(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&pwm->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pwm->chip);
clk_disable_unprepare(pwm->bus_clk);
reset_control_assert(pwm->rst);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 35eb19a5a0d1..4701f0c9b921 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -253,7 +253,7 @@ static int ecap_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -267,11 +267,9 @@ static int ecap_pwm_probe(struct platform_device *pdev)
static int ecap_pwm_remove(struct platform_device *pdev)
{
- struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 17909fa53211..5b723a48c5f1 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -485,11 +485,13 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
{
struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ pwmchip_remove(&pc->chip);
+
clk_unprepare(pc->tbclk);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 6c8df5f4e87d..49d9f7a78012 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -276,7 +276,6 @@ static const struct pwm_ops twl6030_pwmled_ops = {
static int twl_pwmled_probe(struct platform_device *pdev)
{
struct twl_pwmled_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -294,20 +293,7 @@ static int twl_pwmled_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwmled_remove(struct platform_device *pdev)
-{
- struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -325,7 +311,6 @@ static struct platform_driver twl_pwmled_driver = {
.of_match_table = of_match_ptr(twl_pwmled_of_match),
},
.probe = twl_pwmled_probe,
- .remove = twl_pwmled_remove,
};
module_platform_driver(twl_pwmled_driver);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index e83a826bf621..203194f2c92e 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -298,7 +298,6 @@ static const struct pwm_ops twl6030_pwm_ops = {
static int twl_pwm_probe(struct platform_device *pdev)
{
struct twl_pwm_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -314,20 +313,7 @@ static int twl_pwm_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwm_remove(struct platform_device *pdev)
-{
- struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -345,7 +331,6 @@ static struct platform_driver twl_pwm_driver = {
.of_match_table = of_match_ptr(twl_pwm_of_match),
},
.probe = twl_pwm_probe,
- .remove = twl_pwm_remove,
};
module_platform_driver(twl_pwm_driver);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 12153d5801ce..e1bc5214494e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -75,6 +75,15 @@ config RTC_DEBUG
Say yes here to enable debugging support in the RTC framework
and individual RTC drivers.
+config RTC_LIB_KUNIT_TEST
+ tristate "KUnit test for RTC lib functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test RTC library functions.
+
+ If unsure, say N.
+
config RTC_NVMEM
bool "RTC non volatile storage support"
select NVMEM
@@ -624,6 +633,7 @@ config RTC_DRV_FM3130
config RTC_DRV_RX8010
tristate "Epson RX8010SJ"
+ select REGMAP_I2C
help
If you say yes here you get support for the Epson RX8010SJ RTC
chip.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2dd0dd956b0e..5ceeafe4d5b2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,6 +15,8 @@ rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += sysfs.o
+obj-$(CONFIG_RTC_LIB_KUNIT_TEST) += lib_test.o
+
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index 23284580df97..fe361652727a 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -6,6 +6,8 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c and other bits
+ *
+ * Author: Cassio Neri <cassio.neri@gmail.com> (rtc_time64_to_tm)
*/
#include <linux/export.h>
@@ -22,8 +24,6 @@ static const unsigned short rtc_ydays[2][13] = {
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
-#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400)
-
/*
* The number of days in the month.
*/
@@ -42,42 +42,95 @@ int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
}
EXPORT_SYMBOL(rtc_year_days);
-/*
- * rtc_time64_to_tm - Converts time64_t to rtc_time.
- * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
+/**
+ * rtc_time64_to_tm - converts time64_t to rtc_time.
+ *
+ * @time: The number of seconds since 01-01-1970 00:00:00.
+ * (Must be positive.)
+ * @tm: Pointer to the struct rtc_time.
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int month, year, secs;
+ unsigned int secs;
int days;
+ u64 u64tmp;
+ u32 u32tmp, udays, century, day_of_century, year_of_century, year,
+ day_of_year, month, day;
+ bool is_Jan_or_Feb, is_leap_year;
+
/* time must be positive */
days = div_s64_rem(time, 86400, &secs);
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
- year = 1970 + days / 365;
- days -= (year - 1970) * 365
- + LEAPS_THRU_END_OF(year - 1)
- - LEAPS_THRU_END_OF(1970 - 1);
- while (days < 0) {
- year -= 1;
- days += 365 + is_leap_year(year);
- }
- tm->tm_year = year - 1900;
- tm->tm_yday = days + 1;
-
- for (month = 0; month < 11; month++) {
- int newdays;
-
- newdays = days - rtc_month_days(month, year);
- if (newdays < 0)
- break;
- days = newdays;
- }
- tm->tm_mon = month;
- tm->tm_mday = days + 1;
+ /*
+ * The following algorithm is, basically, Proposition 6.3 of Neri
+ * and Schneider [1]. In a few words: it works on the computational
+ * (fictitious) calendar where the year starts in March, month = 2
+ * (*), and finishes in February, month = 13. This calendar is
+ * mathematically convenient because the day of the year does not
+ * depend on whether the year is leap or not. For instance:
+ *
+ * March 1st 0-th day of the year;
+ * ...
+ * April 1st 31-st day of the year;
+ * ...
+ * January 1st 306-th day of the year; (Important!)
+ * ...
+ * February 28th 364-th day of the year;
+ * February 29th 365-th day of the year (if it exists).
+ *
+ * After having worked out the date in the computational calendar
+ * (using just arithmetics) it's easy to convert it to the
+ * corresponding date in the Gregorian calendar.
+ *
+ * [1] "Euclidean Affine Functions and Applications to Calendar
+ * Algorithms". https://arxiv.org/abs/2102.06959
+ *
+ * (*) The numbering of months follows rtc_time more closely and
+ * thus, is slightly different from [1].
+ */
+
+ udays = ((u32) days) + 719468;
+
+ u32tmp = 4 * udays + 3;
+ century = u32tmp / 146097;
+ day_of_century = u32tmp % 146097 / 4;
+
+ u32tmp = 4 * day_of_century + 3;
+ u64tmp = 2939745ULL * u32tmp;
+ year_of_century = upper_32_bits(u64tmp);
+ day_of_year = lower_32_bits(u64tmp) / 2939745 / 4;
+
+ year = 100 * century + year_of_century;
+ is_leap_year = year_of_century != 0 ?
+ year_of_century % 4 == 0 : century % 4 == 0;
+
+ u32tmp = 2141 * day_of_year + 132377;
+ month = u32tmp >> 16;
+ day = ((u16) u32tmp) / 2141;
+
+ /*
+ * Recall that January 01 is the 306-th day of the year in the
+ * computational (not Gregorian) calendar.
+ */
+ is_Jan_or_Feb = day_of_year >= 306;
+
+ /* Converts to the Gregorian calendar. */
+ year = year + is_Jan_or_Feb;
+ month = is_Jan_or_Feb ? month - 12 : month;
+ day = day + 1;
+
+ day_of_year = is_Jan_or_Feb ?
+ day_of_year - 306 : day_of_year + 31 + 28 + is_leap_year;
+
+ /* Converts to rtc_time's format. */
+ tm->tm_year = (int) (year - 1900);
+ tm->tm_mon = (int) month;
+ tm->tm_mday = (int) day;
+ tm->tm_yday = (int) day_of_year + 1;
tm->tm_hour = secs / 3600;
secs -= tm->tm_hour * 3600;
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
new file mode 100644
index 000000000000..d5caf36c56cd
--- /dev/null
+++ b/drivers/rtc/lib_test.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: LGPL-2.1+
+
+#include <kunit/test.h>
+#include <linux/rtc.h>
+
+/*
+ * Advance a date by one day.
+ */
+static void advance_date(int *year, int *month, int *mday, int *yday)
+{
+ if (*mday != rtc_month_days(*month - 1, *year)) {
+ ++*mday;
+ ++*yday;
+ return;
+ }
+
+ *mday = 1;
+ if (*month != 12) {
+ ++*month;
+ ++*yday;
+ return;
+ }
+
+ *month = 1;
+ *yday = 1;
+ ++*year;
+}
+
+/*
+ * Checks every day in a 160000 years interval starting on 1970-01-01
+ * against the expected result.
+ */
+static void rtc_time64_to_tm_test_date_range(struct kunit *test)
+{
+ /*
+ * 160000 years = (160000 / 400) * 400 years
+ * = (160000 / 400) * 146097 days
+ * = (160000 / 400) * 146097 * 86400 seconds
+ */
+ time64_t total_secs = ((time64_t) 160000) / 400 * 146097 * 86400;
+
+ int year = 1970;
+ int month = 1;
+ int mday = 1;
+ int yday = 1;
+
+ struct rtc_time result;
+ time64_t secs;
+ s64 days;
+
+ for (secs = 0; secs <= total_secs; secs += 86400) {
+
+ rtc_time64_to_tm(secs, &result);
+
+ days = div_s64(secs, 86400);
+
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ year, month, mday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, mday, result.tm_mday, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG);
+
+ advance_date(&year, &month, &mday, &yday);
+ }
+}
+
+static struct kunit_case rtc_lib_test_cases[] = {
+ KUNIT_CASE(rtc_time64_to_tm_test_date_range),
+ {}
+};
+
+static struct kunit_suite rtc_lib_test_suite = {
+ .name = "rtc_lib_test_cases",
+ .test_cases = rtc_lib_test_cases,
+};
+
+kunit_test_suite(rtc_lib_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 670fd8a2970e..4eb53412b808 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -229,19 +229,13 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
- /* REVISIT: if the clock has a "century" register, use
- * that instead of the heuristic in mc146818_get_time().
- * That'll make Y3K compatility (year > 2070) easy!
- */
mc146818_get_time(t);
return 0;
}
static int cmos_set_time(struct device *dev, struct rtc_time *t)
{
- /* REVISIT: set the "century" register if available
- *
- * NOTE: this ignores the issue whereby updating the seconds
+ /* NOTE: this ignores the issue whereby updating the seconds
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
@@ -1053,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev)
* ACK the rtc irq here
*/
if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+ local_irq_disable();
cmos_interrupt(0, (void *)cmos->rtc);
+ local_irq_enable();
return;
}
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index c914091819ba..d38aaf08108c 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -60,14 +60,23 @@
#define RX8025_ADJ_DATA_MAX 62
#define RX8025_ADJ_DATA_MIN -62
+enum rx_model {
+ model_rx_unknown,
+ model_rx_8025,
+ model_rx_8035,
+ model_last
+};
+
static const struct i2c_device_id rx8025_id[] = {
- { "rx8025", 0 },
+ { "rx8025", model_rx_8025 },
+ { "rx8035", model_rx_8035 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
struct rtc_device *rtc;
+ enum rx_model model;
u8 ctrl1;
};
@@ -100,10 +109,26 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
length, values);
}
+static int rx8025_is_osc_stopped(enum rx_model model, int ctrl2)
+{
+ int xstp = ctrl2 & RX8025_BIT_CTRL2_XST;
+ /* XSTP bit has different polarity on RX-8025 vs RX-8035.
+ * RX-8025: 0 == oscillator stopped
+ * RX-8035: 1 == oscillator stopped
+ */
+
+ if (model == model_rx_8025)
+ xstp = !xstp;
+
+ return xstp;
+}
+
static int rx8025_check_validity(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct rx8025_data *drvdata = dev_get_drvdata(dev);
int ctrl2;
+ int xstp;
ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -117,7 +142,8 @@ static int rx8025_check_validity(struct device *dev)
return -EINVAL;
}
- if (!(ctrl2 & RX8025_BIT_CTRL2_XST)) {
+ xstp = rx8025_is_osc_stopped(drvdata->model, ctrl2);
+ if (xstp) {
dev_warn(dev, "crystal stopped, date is invalid\n");
return -EINVAL;
}
@@ -127,6 +153,7 @@ static int rx8025_check_validity(struct device *dev)
static int rx8025_reset_validity(struct i2c_client *client)
{
+ struct rx8025_data *drvdata = i2c_get_clientdata(client);
int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -134,22 +161,28 @@ static int rx8025_reset_validity(struct i2c_client *client)
ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
+ if (drvdata->model == model_rx_8025)
+ ctrl2 |= RX8025_BIT_CTRL2_XST;
+ else
+ ctrl2 &= ~(RX8025_BIT_CTRL2_XST);
+
return rx8025_write_reg(client, RX8025_REG_CTRL2,
- ctrl2 | RX8025_BIT_CTRL2_XST);
+ ctrl2);
}
static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- int status;
+ int status, xstp;
rtc_lock(rx8025->rtc);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
- if (!(status & RX8025_BIT_CTRL2_XST))
+ xstp = rx8025_is_osc_stopped(rx8025->model, status);
+ if (xstp)
dev_warn(&client->dev, "Oscillation stop was detected,"
"you may have to readjust the clock\n");
@@ -519,6 +552,9 @@ static int rx8025_probe(struct i2c_client *client,
i2c_set_clientdata(client, rx8025);
+ if (id)
+ rx8025->model = id->driver_data;
+
err = rx8025_init_client(client);
if (err)
return err;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 6b56f8eacba6..fb9c6b709e13 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -204,15 +204,9 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_WEEKDAY] = 1 << tm->tm_wday;
data[RTC_DATE] = tm->tm_mday;
data[RTC_MONTH] = tm->tm_mon + 1;
- data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
+ data[RTC_YEAR1] = tm->tm_year - 100;
- if (tm->tm_year < 100) {
- pr_err("RTC cannot handle the year %d\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -786,29 +780,35 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- device_init_wakeup(&pdev->dev, 1);
-
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
- &s5m_rtc_ops, THIS_MODULE);
-
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
- if (!info->irq) {
- dev_info(&pdev->dev, "Alarm IRQ not available\n");
- return 0;
+ info->rtc_dev->ops = &s5m_rtc_ops;
+
+ if (info->device_type == S5M8763X) {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_9999;
+ } else {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
}
- ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
- s5m_rtc_alarm_irq, 0, "rtc-alarm0",
- info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->irq, ret);
- return ret;
+ if (!info->irq) {
+ clear_bit(RTC_FEATURE_ALARM, info->rtc_dev->features);
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ s5m_rtc_alarm_irq, 0, "rtc-alarm0",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->irq, ret);
+ return ret;
+ }
+ device_init_wakeup(&pdev->dev, 1);
}
- return 0;
+ return devm_rtc_register_device(info->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index bc89c62ccb9b..75e4c2d777b9 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -467,6 +467,6 @@ static struct platform_driver tps65910_rtc_driver = {
};
module_platform_driver(tps65910_rtc_driver);
-MODULE_ALIAS("platform:rtc-tps65910");
+MODULE_ALIAS("platform:tps65910-rtc");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 376f1efbbb86..d0416dbd0cd8 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,17 +2,6 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
-config BLK_DEV_XPRAM
- def_tristate m
- prompt "XPRAM disk support"
- depends on S390 && BLOCK
- help
- Select this option if you want to use your expanded storage on S/390
- or zSeries as a disk. This is useful as a _fast_ swap device if you
- want to access more than 2G of memory when running in 31 bit mode.
- This option is also available as a module which will be called
- xpram. If unsure, say "N".
-
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 60c85cff556f..a0a54d2f063f 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
-obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
deleted file mode 100644
index ce98fab4d43c..000000000000
--- a/drivers/s390/block/xpram.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xpram.c -- the S/390 expanded memory RAM-disk
- *
- * significant parts of this code are based on
- * the sbull device driver presented in
- * A. Rubini: Linux Device Drivers
- *
- * Author of XPRAM specific coding: Reinhard Buendgen
- * buendgen@de.ibm.com
- * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * External interfaces:
- * Interfaces to linux kernel
- * xpram_setup: read kernel parameters
- * Device specific file operations
- * xpram_iotcl
- * xpram_open
- *
- * "ad-hoc" partitioning:
- * the expanded memory can be partitioned among several devices
- * (with different minors). The partitioning set up can be
- * set by kernel or module parameters (int devs & int sizes[])
- *
- * Potential future improvements:
- * generic hard disk support to replace ad-hoc partitioning
- */
-
-#define KMSG_COMPONENT "xpram"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/ctype.h> /* isdigit, isxdigit */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/device.h>
-#include <linux/bio.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-
-#define XPRAM_NAME "xpram"
-#define XPRAM_DEVS 1 /* one partition */
-#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
-
-typedef struct {
- unsigned int size; /* size of xpram segment in pages */
- unsigned int offset; /* start page of xpram segment */
-} xpram_device_t;
-
-static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
-static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
-static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
-static unsigned int xpram_pages;
-static int xpram_devs;
-
-/*
- * Parameter parsing functions.
- */
-static int devs = XPRAM_DEVS;
-static char *sizes[XPRAM_MAX_DEVS];
-
-module_param(devs, int, 0);
-module_param_array(sizes, charp, NULL, 0);
-
-MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
- "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
-MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
- "the defaults are 0s \n" \
- "All devices with size 0 equally partition the "
- "remaining space on the expanded strorage not "
- "claimed by explicit sizes\n");
-MODULE_LICENSE("GPL");
-
-/*
- * Copy expanded memory page (4kB) into main memory
- * Arguments
- * page_addr: address of target page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgin failed
- * -ENXIO: if xpram has vanished
- */
-static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Copy a 4kB page of main memory to an expanded memory page
- * Arguments
- * page_addr: address of source page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgout failed
- * -ENXIO: if xpram has vanished
- */
-static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Check if xpram is available.
- */
-static int __init xpram_present(void)
-{
- unsigned long mem_page;
- int rc;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- rc = xpram_page_in(mem_page, 0);
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
- * Return index of the last available xpram page.
- */
-static unsigned long __init xpram_highest_page_index(void)
-{
- unsigned int page_index, add_bit;
- unsigned long mem_page;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return 0;
-
- page_index = 0;
- add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
- while (add_bit > 0) {
- if (xpram_page_in(mem_page, page_index | add_bit) == 0)
- page_index |= add_bit;
- add_bit >>= 1;
- }
-
- free_page (mem_page);
-
- return page_index;
-}
-
-/*
- * Block device make request function.
- */
-static blk_qc_t xpram_submit_bio(struct bio *bio)
-{
- xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec bvec;
- struct bvec_iter iter;
- unsigned int index;
- unsigned long page_addr;
- unsigned long bytes;
-
- blk_queue_split(&bio);
-
- if ((bio->bi_iter.bi_sector & 7) != 0 ||
- (bio->bi_iter.bi_size & 4095) != 0)
- /* Request is not page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_size >> 12) > xdev->size)
- /* Request size is no page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
- goto fail;
- index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, iter) {
- page_addr = (unsigned long)
- kmap(bvec.bv_page) + bvec.bv_offset;
- bytes = bvec.bv_len;
- if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
- /* More paranoia. */
- goto fail;
- while (bytes > 0) {
- if (bio_data_dir(bio) == READ) {
- if (xpram_page_in(page_addr, index) != 0)
- goto fail;
- } else {
- if (xpram_page_out(page_addr, index) != 0)
- goto fail;
- }
- page_addr += 4096;
- bytes -= 4096;
- index++;
- }
- }
- bio_endio(bio);
- return BLK_QC_T_NONE;
-fail:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
-}
-
-static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- unsigned long size;
-
- /*
- * get geometry: we have to fake one... trim the size to a
- * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
- * whatever cylinders. Tell also that data starts at sector. 4.
- */
- size = (xpram_pages * 8) & ~0x3f;
- geo->cylinders = size >> 6;
- geo->heads = 4;
- geo->sectors = 16;
- geo->start = 4;
- return 0;
-}
-
-static const struct block_device_operations xpram_devops =
-{
- .owner = THIS_MODULE,
- .submit_bio = xpram_submit_bio,
- .getgeo = xpram_getgeo,
-};
-
-/*
- * Setup xpram_sizes array.
- */
-static int __init xpram_setup_sizes(unsigned long pages)
-{
- unsigned long mem_needed;
- unsigned long mem_auto;
- unsigned long long size;
- char *sizes_end;
- int mem_auto_no;
- int i;
-
- /* Check number of devices. */
- if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
- pr_err("%d is not a valid number of XPRAM devices\n",devs);
- return -EINVAL;
- }
- xpram_devs = devs;
-
- /*
- * Copy sizes array to xpram_sizes and align partition
- * sizes to page boundary.
- */
- mem_needed = 0;
- mem_auto_no = 0;
- for (i = 0; i < xpram_devs; i++) {
- if (sizes[i]) {
- size = simple_strtoull(sizes[i], &sizes_end, 0);
- switch (*sizes_end) {
- case 'g':
- case 'G':
- size <<= 20;
- break;
- case 'm':
- case 'M':
- size <<= 10;
- }
- xpram_sizes[i] = (size + 3) & -4UL;
- }
- if (xpram_sizes[i])
- mem_needed += xpram_sizes[i];
- else
- mem_auto_no++;
- }
-
- pr_info(" number of devices (partitions): %d \n", xpram_devs);
- for (i = 0; i < xpram_devs; i++) {
- if (xpram_sizes[i])
- pr_info(" size of partition %d: %u kB\n",
- i, xpram_sizes[i]);
- else
- pr_info(" size of partition %d to be set "
- "automatically\n",i);
- }
- pr_info(" memory needed (for sized partitions): %lu kB\n",
- mem_needed);
- pr_info(" partitions to be sized automatically: %d\n",
- mem_auto_no);
-
- if (mem_needed > pages * 4) {
- pr_err("Not enough expanded memory available\n");
- return -EINVAL;
- }
-
- /*
- * partitioning:
- * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
- * else: ; all partitions with zero xpram_sizes[i]
- * partition equally the remaining space
- */
- if (mem_auto_no) {
- mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
- pr_info(" automatically determined "
- "partition size: %lu kB\n", mem_auto);
- for (i = 0; i < xpram_devs; i++)
- if (xpram_sizes[i] == 0)
- xpram_sizes[i] = mem_auto;
- }
- return 0;
-}
-
-static int __init xpram_setup_blkdev(void)
-{
- unsigned long offset;
- int i, rc = -ENOMEM;
-
- for (i = 0; i < xpram_devs; i++) {
- xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE);
- if (!xpram_disks[i])
- goto out;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM,
- xpram_disks[i]->queue);
- blk_queue_logical_block_size(xpram_disks[i]->queue, 4096);
- }
-
- /*
- * Register xpram major.
- */
- rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
- if (rc < 0)
- goto out;
-
- /*
- * Setup device structures.
- */
- offset = 0;
- for (i = 0; i < xpram_devs; i++) {
- struct gendisk *disk = xpram_disks[i];
-
- xpram_devices[i].size = xpram_sizes[i] / 4;
- xpram_devices[i].offset = offset;
- offset += xpram_devices[i].size;
- disk->major = XPRAM_MAJOR;
- disk->first_minor = i;
- disk->minors = 1;
- disk->fops = &xpram_devops;
- disk->private_data = &xpram_devices[i];
- sprintf(disk->disk_name, "slram%d", i);
- set_capacity(disk, xpram_sizes[i] << 1);
- add_disk(disk);
- }
-
- return 0;
-out:
- while (i--)
- blk_cleanup_disk(xpram_disks[i]);
- return rc;
-}
-
-/*
- * Finally, the init/exit functions.
- */
-static void __exit xpram_exit(void)
-{
- int i;
- for (i = 0; i < xpram_devs; i++) {
- del_gendisk(xpram_disks[i]);
- blk_cleanup_disk(xpram_disks[i]);
- }
- unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
-}
-
-static int __init xpram_init(void)
-{
- int rc;
-
- /* Find out size of expanded memory. */
- if (xpram_present() != 0) {
- pr_err("No expanded memory available\n");
- return -ENODEV;
- }
- xpram_pages = xpram_highest_page_index() + 1;
- pr_info(" %u pages expanded memory found (%lu KB).\n",
- xpram_pages, (unsigned long) xpram_pages*4);
- rc = xpram_setup_sizes(xpram_pages);
- if (rc)
- return rc;
- return xpram_setup_blkdev();
-}
-
-module_init(xpram_init);
-module_exit(xpram_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 87cdbace1453..e4592890f20a 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -292,13 +292,15 @@ con3270_update(struct timer_list *t)
* Read tasklet.
*/
static void
-con3270_read_tasklet(struct raw3270_request *rrq)
+con3270_read_tasklet(unsigned long data)
{
static char kreset_data = TW_KR;
+ struct raw3270_request *rrq;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
+ rrq = (struct raw3270_request *)data;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
@@ -625,8 +627,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
timer_setup(&condev->timer, con3270_update, 0);
- tasklet_init(&condev->readlet,
- (void (*)(unsigned long)) con3270_read_tasklet,
+ tasklet_init(&condev->readlet, con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index e1686a69a68e..6f2b64040078 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -34,12 +34,13 @@ void schedule_sysrq_work(struct sysrq_work *sw)
/**
- * Check for special chars at start of input.
+ * ctrlchar_handle - check for special chars at start of input
*
- * @param buf Console input buffer.
- * @param len Length of valid data in buffer.
- * @param tty The tty struct for this console.
- * @return CTRLCHAR_NONE, if nothing matched,
+ * @buf: console input buffer
+ * @len: length of valid data in buffer
+ * @tty: the tty struct for this console
+ *
+ * Return: CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 37ee8f698c3b..02b6f394aec2 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -26,7 +26,7 @@
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
- * @cmd: FTP transfer function
+ * @transfer: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b4b84e3e0949..2cf7fe131ece 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -28,7 +28,7 @@
#define SCLP_HEADER "sclp: "
struct sclp_trace_entry {
- char id[4];
+ char id[4] __nonstring;
u32 a;
u64 b;
};
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 4dd2eb634856..f3c656975e05 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -262,7 +262,10 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- css_schedule_eval_all_unreg(0);
+ /* There could be subchannels without proper devices connected.
+ * evaluate all the entries
+ */
+ css_schedule_eval_all();
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index adf33b653d87..8d14569823d7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -867,19 +867,6 @@ out_err:
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- /* Get subchannel reference for local processing. */
- if (!get_device(cdev->dev.parent))
- return;
- sch = to_subchannel(cdev->dev.parent);
- css_sch_device_unregister(sch);
- /* Release subchannel reference for local processing. */
- put_device(&sch->dev);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1857,10 +1844,10 @@ static void ccw_device_todo(struct work_struct *work)
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
- if (sch_is_pseudo_sch(sch))
- ccw_device_unregister(cdev);
- else
- ccw_device_call_sch_unregister(cdev);
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ ccw_device_unregister(cdev);
break;
default:
break;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 740996d0dc8c..7835a87a60b5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -91,7 +91,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
}
/**
- * diag_get_dev_info - retrieve device information via diag 0x210
+ * diag210_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index fa0cb8633040..356318746dd1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -71,7 +71,7 @@ static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
-/**
+/*
* Process a rescan of the transport layer.
*
* Returns 1, if the rescan has been processed, otherwise 0.
@@ -462,7 +462,7 @@ static void zcdn_destroy_all(void)
#endif
-/**
+/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
@@ -473,7 +473,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_write(): Not allowed.
*
* Write is is not allowed
@@ -484,7 +484,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
@@ -512,7 +512,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return stream_open(inode, filp);
}
-/**
+/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
@@ -2153,7 +2153,7 @@ static void zcdn_exit(void)
#endif
-/**
+/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
@@ -2191,7 +2191,7 @@ out:
return rc;
}
-/**
+/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index fa8293d37006..2bd49950ba81 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -65,7 +65,7 @@ static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
-/**
+/*
* Probe function for CEX2A card devices. It always accepts the AP device
* since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
@@ -124,7 +124,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A card driver information
* if an AP card device is removed.
*/
@@ -142,7 +142,7 @@ static struct ap_driver zcrypt_cex2a_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2A queue devices. It always accepts the AP device
* since the bus_match already checked the queue type.
* @ap_dev: pointer to the AP device.
@@ -183,7 +183,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index a0b9f1153e12..6360fdd06160 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -171,7 +171,7 @@ static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
-/**
+/*
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
@@ -237,7 +237,7 @@ out_free:
return rc;
}
-/**
+/*
* Probe function for CEX2C/CEX3C card devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -303,7 +303,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C card driver information
* if an AP card device is removed.
*/
@@ -325,7 +325,7 @@ static struct ap_driver zcrypt_cex2c_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2C/CEX3C queue devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -376,7 +376,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 1f7ec54142e1..06024bbe9a58 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -394,7 +394,7 @@ static const struct attribute_group ep11_queue_attr_grp = {
.attrs = ep11_queue_attrs,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -562,7 +562,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
* information if an AP card device is removed.
*/
@@ -586,7 +586,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -652,7 +652,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 99405472824d..99937f3e1d49 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
@@ -136,7 +136,7 @@ struct type50_crb3_msg {
unsigned char message[512];
} __packed;
-/**
+/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
@@ -188,7 +188,7 @@ unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
return 0;
}
-/**
+/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
@@ -255,7 +255,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
@@ -346,7 +346,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
@@ -418,7 +418,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -457,7 +457,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -502,7 +502,7 @@ out:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -547,7 +547,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 752c6398fcd6..bc5a8c31ba73 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* CPRB
* Note that all shorts, ints and longs are little-endian.
* All pointer fields are 32-bits long, and mean nothing
@@ -107,7 +107,7 @@ struct function_and_rules_block {
unsigned char only_rule[8];
} __packed;
-/**
+/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
@@ -236,7 +236,7 @@ int speed_idx_ep11(int req_type)
}
-/**
+/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
@@ -305,7 +305,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
@@ -374,7 +374,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
@@ -571,7 +571,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
return 0;
}
-/**
+/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
@@ -697,7 +697,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -728,7 +728,7 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -911,7 +911,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -966,7 +966,7 @@ out:
complete(&(resp_type->work));
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -1015,7 +1015,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1063,7 +1063,7 @@ out_free:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1112,7 +1112,7 @@ out_free:
return rc;
}
-/**
+/*
* Fetch function code from cprb.
* Extracting the fc requires to copy the cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1140,7 +1140,7 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1170,7 +1170,7 @@ out:
return rc;
}
-/**
+/*
* Fetch function code from ep11 cprb.
* Extracting the fc requires to copy the ep11 cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1198,7 +1198,7 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
}
-/**
+/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1228,7 +1228,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
} __packed * payload_hdr = NULL;
- /**
+ /*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
@@ -1299,7 +1299,7 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
return 0;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1339,7 +1339,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index ca473b368905..cbc3b62cd9e5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -766,7 +766,7 @@ static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
}
/**
- * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * zfcp_dbf_adapter_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
@@ -824,7 +824,7 @@ err_out:
}
/**
- * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 6da8f6d05d39..c1f979296c1a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2275,7 +2275,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
}
/**
- * zfcp_fsf_close_LUN - close LUN
+ * zfcp_fsf_close_lun - close LUN
* @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 8f19bed6384e..6a2720105138 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -384,7 +384,7 @@ free_req_q:
}
/**
- * zfcp_close_qdio - close qdio queues for an adapter
+ * zfcp_qdio_close - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 59333f0257a8..60f2a04f0869 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -111,9 +111,9 @@ static void zfcp_unit_release(struct device *dev)
}
/**
- * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * zfcp_unit_add - add unit to unit list of a port.
* @port: pointer to port where unit is added
- * @fcp_lun: FCP LUN of unit to be enqueued
+ * @fcp_lun: FCP LUN of unit to be added
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 2f1894588e0b..b2730e859df8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1629,8 +1629,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
struct device *dev = &cfg->dev->dev;
struct pci_dev *pdev = cfg->dev;
- int rc = 0;
- int ro_start, ro_size, i, j, k;
+ int i, k, rc = 0;
+ unsigned int kw_size;
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
@@ -1648,24 +1648,6 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
goto out;
}
- /* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (unlikely(ro_start < 0)) {
- dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
- rc = -ENODEV;
- goto out;
- }
-
- /* Get the read only section size, cap when extends beyond read VPD */
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (unlikely((i + j) > vpd_size)) {
- dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
- __func__, (i + j), vpd_size);
- ro_size = vpd_size - i;
- }
-
/*
* Find the offset of the WWPN tag within the read only
* VPD data and validate the found field (partials are
@@ -1681,11 +1663,9 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
-
- i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (i < 0) {
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ wwpn_vpd_tags[k], &kw_size);
+ if (i == -ENOENT) {
if (wwpn_vpd_required)
dev_err(dev, "%s: Port %d WWPN not found\n",
__func__, k);
@@ -1693,9 +1673,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
continue;
}
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+ if (i < 0 || kw_size != WWPN_LEN) {
dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
__func__, k);
rc = -ENODEV;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 5a86cffd78f6..4310cb342a9f 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -18,10 +18,10 @@
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <trace/events/thermal.h>
-#define HZ_PER_KHZ 1000
#define SCALE_ERROR_MITIGATION 100
/**
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 823354a1a91a..19926beeb3b7 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -108,9 +108,12 @@ static struct attribute *imok_attr[] = {
NULL
};
+static const struct attribute_group imok_attribute_group = {
+ .attrs = imok_attr,
+};
+
static const struct attribute_group data_attribute_group = {
.bin_attrs = data_attributes,
- .attrs = imok_attr,
};
static ssize_t available_uuids_show(struct device *dev,
@@ -522,6 +525,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
if (result)
goto free_rel_misc;
+ if (acpi_has_method(priv->adev->handle, "IMOK")) {
+ result = sysfs_create_group(&pdev->dev.kobj, &imok_attribute_group);
+ if (result)
+ goto free_imok;
+ }
+
if (priv->data_vault) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
@@ -545,6 +554,8 @@ free_sysfs:
}
free_uuid:
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+free_imok:
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
free_rel_misc:
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
@@ -573,6 +584,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (priv->data_vault)
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
thermal_zone_device_unregister(priv->thermal);
kfree(priv->data_vault);
kfree(priv->trts);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index b0eb5ece9243..a5b58ea89cc6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -528,7 +528,7 @@ static int start_power_clamp(void)
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
- get_online_cpus();
+ cpus_read_lock();
/* prefer BSP */
control_cpu = 0;
@@ -542,7 +542,7 @@ static int start_power_clamp(void)
for_each_online_cpu(cpu) {
start_power_clamp_worker(cpu);
}
- put_online_cpus();
+ cpus_read_unlock();
return 0;
}
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index 8ec10d55d421..cd80c7db4073 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -79,6 +79,8 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
{}
};
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 8d5ac2df26dc..7d942f71e532 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -31,3 +31,13 @@ config QCOM_SPMI_TEMP_ALARM
trip points. The temperature reported by the thermal sensor reflects the
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+
+config QCOM_LMH
+ tristate "Qualcomm Limits Management Hardware"
+ depends on ARCH_QCOM
+ help
+ This enables initialization of Qualcomm limits management
+ hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
+ input from temperature and current sensors. On many newer Qualcomm SoCs
+ LMh is configured in the firmware and this feature need not be enabled.
+ However, on certain SoCs like sdm845 LMh has to be configured from kernel.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 252ea7d9da0b..0fa2512042e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -5,3 +5,4 @@ qcom_tsens-y += tsens.o tsens-v2.o tsens-v1.o tsens-v0_1.o \
tsens-8960.o
obj-$(CONFIG_QCOM_SPMI_ADC_TM5) += qcom-spmi-adc-tm5.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
+obj-$(CONFIG_QCOM_LMH) += lmh.o
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
new file mode 100644
index 000000000000..eafa7526eb8b
--- /dev/null
+++ b/drivers/thermal/qcom/lmh.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/qcom_scm.h>
+
+#define LMH_NODE_DCVS 0x44435653
+#define LMH_CLUSTER0_NODE_ID 0x6370302D
+#define LMH_CLUSTER1_NODE_ID 0x6370312D
+
+#define LMH_SUB_FN_THERMAL 0x54484D4C
+#define LMH_SUB_FN_CRNT 0x43524E54
+#define LMH_SUB_FN_REL 0x52454C00
+#define LMH_SUB_FN_BCL 0x42434C00
+
+#define LMH_ALGO_MODE_ENABLE 0x454E424C
+#define LMH_TH_HI_THRESHOLD 0x48494748
+#define LMH_TH_LOW_THRESHOLD 0x4C4F5700
+#define LMH_TH_ARM_THRESHOLD 0x41524D00
+
+#define LMH_REG_DCVS_INTR_CLR 0x8
+
+struct lmh_hw_data {
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+};
+
+static irqreturn_t lmh_handle_irq(int hw_irq, void *data)
+{
+ struct lmh_hw_data *lmh_data = data;
+ int irq = irq_find_mapping(lmh_data->domain, 0);
+
+ /* Call the cpufreq driver to handle the interrupt */
+ if (irq)
+ generic_handle_irq(irq);
+
+ return 0;
+}
+
+static void lmh_enable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ /* Clear the existing interrupt */
+ writel(0xff, lmh_data->base + LMH_REG_DCVS_INTR_CLR);
+ enable_irq(lmh_data->irq);
+}
+
+static void lmh_disable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ disable_irq_nosync(lmh_data->irq);
+}
+
+static struct irq_chip lmh_irq_chip = {
+ .name = "lmh",
+ .irq_enable = lmh_enable_interrupt,
+ .irq_disable = lmh_disable_interrupt
+};
+
+static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct lmh_hw_data *lmh_data = d->host_data;
+
+ irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, lmh_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lmh_irq_ops = {
+ .map = lmh_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int lmh_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *cpu_node;
+ struct lmh_hw_data *lmh_data;
+ int temp_low, temp_high, temp_arm, cpu_id, ret;
+ u32 node_id;
+
+ lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ if (!lmh_data)
+ return -ENOMEM;
+
+ lmh_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lmh_data->base))
+ return PTR_ERR(lmh_data->base);
+
+ cpu_node = of_parse_phandle(np, "cpus", 0);
+ if (!cpu_node)
+ return -EINVAL;
+ cpu_id = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-high-millicelsius", &temp_high);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-high-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-low-millicelsius", &temp_low);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-low-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-arm-millicelsius", &temp_arm);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-arm-millicelsius property\n");
+ return ret;
+ }
+
+ /*
+ * Only sdm845 has lmh hardware currently enabled from hlos. If this is needed
+ * for other platforms, revisit this to check if the <cpu-id, node-id> should be part
+ * of a dt match table.
+ */
+ if (cpu_id == 0) {
+ node_id = LMH_CLUSTER0_NODE_ID;
+ } else if (cpu_id == 4) {
+ node_id = LMH_CLUSTER1_NODE_ID;
+ } else {
+ dev_err(dev, "Wrong CPU id associated with LMh node\n");
+ return -EINVAL;
+ }
+
+ if (!qcom_scm_lmh_dcvsh_available())
+ return -EINVAL;
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling current subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_profile_change(0x1);
+ if (ret) {
+ dev_err(dev, "Error %d changing profile\n", ret);
+ return ret;
+ }
+
+ /* Set default thermal trips */
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_ARM_THRESHOLD, temp_arm,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_HI_THRESHOLD, temp_high,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal HI threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_LOW_THRESHOLD, temp_low,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ lmh_data->irq = platform_get_irq(pdev, 0);
+ lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data);
+ if (!lmh_data->domain) {
+ dev_err(dev, "Error adding irq_domain\n");
+ return -EINVAL;
+ }
+
+ /* Disable the irq and let cpufreq enable it when ready to handle the interrupt */
+ irq_set_status_flags(lmh_data->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, lmh_data->irq, lmh_handle_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "lmh-irq", lmh_data);
+ if (ret) {
+ dev_err(dev, "Error %d registering irq %x\n", ret, lmh_data->irq);
+ irq_domain_remove(lmh_data->domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id lmh_table[] = {
+ { .compatible = "qcom,sdm845-lmh", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lmh_table);
+
+static struct platform_driver lmh_driver = {
+ .probe = lmh_probe,
+ .driver = {
+ .name = "qcom-lmh",
+ .of_match_table = lmh_table,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(lmh_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM LMh driver");
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 232fd0b33325..8494cc04aa21 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -359,6 +359,12 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
&adc_tm->channels[i],
&adc_tm5_ops);
if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) == -ENODEV) {
+ dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
+ adc_tm->channels[i].channel);
+ continue;
+ }
+
dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n",
adc_tm->channels[i].channel, PTR_ERR(tzd));
return PTR_ERR(tzd);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index fdf16aa34eb4..85228d308dd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -84,7 +84,7 @@ struct rcar_gen3_thermal_tsc {
struct thermal_zone_device *zone;
struct equation_coefs coef;
int tj_t;
- int id; /* thermal channel id */
+ unsigned int id; /* thermal channel id */
};
struct rcar_gen3_thermal_priv {
@@ -190,10 +190,64 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
+static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
+ int mcelsius)
+{
+ int celsius, val;
+
+ celsius = DIV_ROUND_CLOSEST(mcelsius, 1000);
+ if (celsius <= INT_FIXPT(tsc->tj_t))
+ val = celsius * tsc->coef.a1 + tsc->coef.b1;
+ else
+ val = celsius * tsc->coef.a2 + tsc->coef.b2;
+
+ return INT_FIXPT(val);
+}
+
+static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+{
+ struct rcar_gen3_thermal_tsc *tsc = devdata;
+ u32 irqmsk = 0;
+
+ if (low != -INT_MAX) {
+ irqmsk |= IRQ_TEMPD1;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, low));
+ }
+
+ if (high != INT_MAX) {
+ irqmsk |= IRQ_TEMP2;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, high));
+ }
+
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, irqmsk);
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
+ .set_trips = rcar_gen3_thermal_set_trips,
};
+static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
+{
+ struct rcar_gen3_thermal_priv *priv = data;
+ unsigned int i;
+ u32 status;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
+ rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
+ if (status)
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct soc_device_attribute r8a7795es1[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
@@ -210,6 +264,9 @@ static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR,
CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN);
@@ -235,6 +292,9 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR);
reg_val |= THCTR_THSST;
@@ -303,6 +363,34 @@ static void rcar_gen3_hwmon_action(void *data)
thermal_remove_hwmon_sysfs(zone);
}
+static int rcar_gen3_thermal_request_irqs(struct rcar_gen3_thermal_priv *priv,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i;
+ char *irqname;
+ int ret, irq;
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s:ch%d",
+ dev_name(dev), i);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int rcar_gen3_thermal_probe(struct platform_device *pdev)
{
struct rcar_gen3_thermal_priv *priv;
@@ -310,7 +398,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
const int *ths_tj_1 = of_device_get_match_data(dev);
struct resource *res;
struct thermal_zone_device *zone;
- int ret, i;
+ unsigned int i;
+ int ret;
/* default values if FUSEs are missing */
/* TODO: Read values from hardware on supported platforms */
@@ -326,6 +415,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ if (rcar_gen3_thermal_request_irqs(priv, pdev))
+ rcar_gen3_tz_of_ops.set_trips = NULL;
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -351,9 +443,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
- priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
-
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
@@ -363,6 +452,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
+ priv->thermal_init(tsc);
+ rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
+
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -376,7 +468,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
+ dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
}
priv->num_tscs = i;
@@ -401,8 +493,12 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+ struct thermal_zone_device *zone = tsc->zone;
priv->thermal_init(tsc);
+ if (zone->ops->set_trips)
+ rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip,
+ zone->prev_high_trip);
}
return 0;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index e9a90bc23b11..f4ab4c5b4b62 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1073,6 +1073,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
dev_err(&pdev->dev, "Failed to get sclk\n");
+ ret = PTR_ERR(data->sclk);
goto err_clk;
} else {
ret = clk_prepare_enable(data->sclk);
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index 46c2215867cd..cfa41d87a794 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVIDIA Tegra thermal drivers"
-depends on ARCH_TEGRA
+depends on ARCH_TEGRA || COMPILE_TEST
config TEGRA_SOCTHERM
tristate "Tegra SOCTHERM thermal management"
@@ -18,4 +18,11 @@ config TEGRA_BPMP_THERMAL
Enable this option for support for sensing system temperature of NVIDIA
Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+config TEGRA30_TSENSOR
+ tristate "Tegra30 Thermal Sensor"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ help
+ Enable this option to support thermal management of NVIDIA Tegra30
+ system-on-chip.
+
endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index 0f2b66edf0d2..eb27d194c583 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
+obj-$(CONFIG_TEGRA30_TSENSOR) += tegra30-tsensor.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 8e303e9d1dc0..210325f92559 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -450,8 +450,8 @@ static int enforce_temp_range(struct device *dev, int trip_temp)
temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
if (temp != trip_temp)
- dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
- trip_temp, temp);
+ dev_dbg(dev, "soctherm: trip temperature %d forced to %d\n",
+ trip_temp, temp);
return temp;
}
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
new file mode 100644
index 000000000000..9b6b693cbcf8
--- /dev/null
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tegra30 SoC Thermal Sensor driver
+ *
+ * Based on downstream HWMON driver from NVIDIA.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2021 GRATE-DRIVER project
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+#define TSENSOR_SENSOR0_CONFIG0 0x0
+#define TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP BIT(0)
+#define TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN BIT(1)
+#define TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN BIT(2)
+#define TSENSOR_SENSOR0_CONFIG0_DVFS_EN BIT(3)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN BIT(4)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN BIT(5)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN BIT(6)
+#define TSENSOR_SENSOR0_CONFIG0_M GENMASK(23, 8)
+#define TSENSOR_SENSOR0_CONFIG0_N GENMASK(31, 24)
+
+#define TSENSOR_SENSOR0_CONFIG1 0x8
+#define TSENSOR_SENSOR0_CONFIG1_TH1 GENMASK(15, 0)
+#define TSENSOR_SENSOR0_CONFIG1_TH2 GENMASK(31, 16)
+
+#define TSENSOR_SENSOR0_CONFIG2 0xc
+#define TSENSOR_SENSOR0_CONFIG2_TH3 GENMASK(15, 0)
+
+#define TSENSOR_SENSOR0_STATUS0 0x18
+#define TSENSOR_SENSOR0_STATUS0_STATE GENMASK(2, 0)
+#define TSENSOR_SENSOR0_STATUS0_INTR BIT(8)
+#define TSENSOR_SENSOR0_STATUS0_CURRENT_VALID BIT(9)
+
+#define TSENSOR_SENSOR0_TS_STATUS1 0x1c
+#define TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT GENMASK(31, 16)
+
+#define TEGRA30_FUSE_TEST_PROG_VER 0x28
+
+#define TEGRA30_FUSE_TSENSOR_CALIB 0x98
+#define TEGRA30_FUSE_TSENSOR_CALIB_LOW GENMASK(15, 0)
+#define TEGRA30_FUSE_TSENSOR_CALIB_HIGH GENMASK(31, 16)
+
+#define TEGRA30_FUSE_SPARE_BIT 0x144
+
+struct tegra_tsensor;
+
+struct tegra_tsensor_calibration_data {
+ int a, b, m, n, p, r;
+};
+
+struct tegra_tsensor_channel {
+ void __iomem *regs;
+ unsigned int id;
+ struct tegra_tsensor *ts;
+ struct thermal_zone_device *tzd;
+};
+
+struct tegra_tsensor {
+ void __iomem *regs;
+ bool swap_channels;
+ struct clk *clk;
+ struct device *dev;
+ struct reset_control *rst;
+ struct tegra_tsensor_channel ch[2];
+ struct tegra_tsensor_calibration_data calib;
+};
+
+static int tegra_tsensor_hw_enable(const struct tegra_tsensor *ts)
+{
+ u32 val;
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(ts->clk);
+ if (err) {
+ dev_err(ts->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ fsleep(1000);
+
+ err = reset_control_deassert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to deassert hardware reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ /*
+ * Sensors are enabled after reset by default, but not gauging
+ * until clock counter is programmed.
+ *
+ * M: number of reference clock pulses after which every
+ * temperature / voltage measurement is made
+ *
+ * N: number of reference clock counts for which the counter runs
+ */
+ val = FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_M, 12500);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_N, 255);
+
+ /* apply the same configuration to both channels */
+ writel_relaxed(val, ts->regs + 0x40 + TSENSOR_SENSOR0_CONFIG0);
+ writel_relaxed(val, ts->regs + 0x80 + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(ts->clk);
+
+ return err;
+}
+
+static int tegra_tsensor_hw_disable(const struct tegra_tsensor *ts)
+{
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(ts->clk);
+
+ return 0;
+}
+
+static void devm_tegra_tsensor_hw_disable(void *data)
+{
+ const struct tegra_tsensor *ts = data;
+
+ tegra_tsensor_hw_disable(ts);
+}
+
+static int tegra_tsensor_get_temp(void *data, int *temp)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ int err, c1, c2, c3, c4, counter;
+ u32 val;
+
+ /*
+ * Counter will be invalid if hardware is misprogrammed or not enough
+ * time passed since the time when sensor was enabled.
+ */
+ err = readl_relaxed_poll_timeout(tsc->regs + TSENSOR_SENSOR0_STATUS0, val,
+ val & TSENSOR_SENSOR0_STATUS0_CURRENT_VALID,
+ 21 * USEC_PER_MSEC,
+ 21 * USEC_PER_MSEC * 50);
+ if (err) {
+ dev_err_once(ts->dev, "ch%u: counter invalid\n", tsc->id);
+ return err;
+ }
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_TS_STATUS1);
+ counter = FIELD_GET(TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT, val);
+
+ /*
+ * This shouldn't happen with a valid counter status, nevertheless
+ * lets verify the value since it's in a separate (from status)
+ * register.
+ */
+ if (counter == 0xffff) {
+ dev_err_once(ts->dev, "ch%u: counter overflow\n", tsc->id);
+ return -EINVAL;
+ }
+
+ /*
+ * temperature = a * counter + b
+ * temperature = m * (temperature ^ 2) + n * temperature + p
+ */
+ c1 = DIV_ROUND_CLOSEST(ts->calib.a * counter + ts->calib.b, 1000000);
+ c1 = c1 ?: 1;
+ c2 = DIV_ROUND_CLOSEST(ts->calib.p, c1);
+ c3 = c1 * ts->calib.m;
+ c4 = ts->calib.n;
+
+ *temp = DIV_ROUND_CLOSEST(c1 * (c2 + c3 + c4), 1000);
+
+ return 0;
+}
+
+static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int temp)
+{
+ int c1, c2;
+
+ c1 = DIV_ROUND_CLOSEST(ts->calib.p - temp * 1000, ts->calib.m);
+ c2 = -ts->calib.r - int_sqrt(ts->calib.r * ts->calib.r - c1);
+
+ return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
+}
+
+static int tegra_tsensor_set_trips(void *data, int low, int high)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ u32 val;
+
+ /*
+ * TSENSOR doesn't trigger interrupt on the "low" temperature breach,
+ * hence bail out if high temperature is unspecified.
+ */
+ if (high == INT_MAX)
+ return 0;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH1;
+
+ high = tegra_tsensor_temp_to_counter(ts, high);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH1, high);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+ .get_temp = tegra_tsensor_get_temp,
+ .set_trips = tegra_tsensor_set_trips,
+};
+
+static bool
+tegra_tsensor_handle_channel_interrupt(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ u32 val;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_STATUS0);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_STATUS0);
+
+ if (FIELD_GET(TSENSOR_SENSOR0_STATUS0_STATE, val) == 5)
+ dev_err_ratelimited(ts->dev, "ch%u: counter overflowed\n", id);
+
+ if (!FIELD_GET(TSENSOR_SENSOR0_STATUS0_INTR, val))
+ return false;
+
+ thermal_zone_device_update(tsc->tzd, THERMAL_EVENT_UNSPECIFIED);
+
+ return true;
+}
+
+static irqreturn_t tegra_tsensor_isr(int irq, void *data)
+{
+ const struct tegra_tsensor *ts = data;
+ bool handled = false;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++)
+ handled |= tegra_tsensor_handle_channel_interrupt(ts, i);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_tsensor_disable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ u32 val;
+ int err;
+
+ if (!tzd)
+ goto stop_channel;
+
+ err = thermal_zone_device_disable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to disable zone: %d\n", id, err);
+ return err;
+ }
+
+stop_channel:
+ /* stop channel gracefully */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+}
+
+static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
+ int *hot_trip, int *crit_trip)
+{
+ unsigned int i;
+
+ /*
+ * 90C is the maximal critical temperature of all Tegra30 SoC variants,
+ * use it for the default trip if unspecified in a device-tree.
+ */
+ *hot_trip = 85000;
+ *crit_trip = 90000;
+
+ for (i = 0; i < tzd->trips; i++) {
+ enum thermal_trip_type type;
+ int trip_temp;
+
+ tzd->ops->get_trip_temp(tzd, i, &trip_temp);
+ tzd->ops->get_trip_type(tzd, i, &type);
+
+ if (type == THERMAL_TRIP_HOT)
+ *hot_trip = trip_temp;
+
+ if (type == THERMAL_TRIP_CRITICAL)
+ *crit_trip = trip_temp;
+ }
+
+ /* clamp hardware trips to the calibration limits */
+ *hot_trip = clamp(*hot_trip, 25000, 90000);
+
+ /*
+ * Kernel will perform a normal system shut down if it will
+ * see that critical temperature is breached, hence set the
+ * hardware limit by 5C higher in order to allow system to
+ * shut down gracefully before sending signal to the Power
+ * Management controller.
+ */
+ *crit_trip = clamp(*crit_trip + 5000, 25000, 90000);
+}
+
+static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ int err, hot_trip = 0, crit_trip = 0;
+ u32 val;
+
+ if (!tzd) {
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+ }
+
+ tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip);
+
+ /* prevent potential racing with tegra_tsensor_set_trips() */
+ mutex_lock(&tzd->lock);
+
+ dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n",
+ id, DIV_ROUND_CLOSEST(crit_trip, 1000));
+
+ hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip);
+ crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip);
+
+ /* program LEVEL2 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH2;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ /* program LEVEL3 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+ val &= ~TSENSOR_SENSOR0_CONFIG2_TH3;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+
+ /*
+ * Enable sensor, emergency shutdown, interrupts for level 1/2/3
+ * breaches and counter overflow condition.
+ *
+ * Disable DIV2 throttle for now since we need to figure out how
+ * to integrate it properly with the thermal framework.
+ *
+ * Thermal levels supported by hardware:
+ *
+ * Level 0 = cold
+ * Level 1 = passive cooling (cpufreq DVFS)
+ * Level 2 = passive cooling assisted by hardware (DIV2)
+ * Level 3 = emergency shutdown assisted by hardware (PMC)
+ */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_DVFS_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN, 0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ mutex_unlock(&tzd->lock);
+
+ err = thermal_zone_device_enable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to enable zone: %d\n", id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool tegra_tsensor_fuse_read_spare(unsigned int spare)
+{
+ u32 val = 0;
+
+ tegra_fuse_readl(TEGRA30_FUSE_SPARE_BIT + spare * 4, &val);
+
+ return !!val;
+}
+
+static int tegra_tsensor_nvmem_setup(struct tegra_tsensor *ts)
+{
+ u32 i, ate_ver = 0, cal = 0, t1_25C = 0, t2_90C = 0;
+ int err, c1_25C, c2_90C;
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TEST_PROG_VER, &ate_ver);
+ if (err) {
+ dev_err_probe(ts->dev, err, "failed to get ATE version\n");
+ return err;
+ }
+
+ if (ate_ver < 8) {
+ dev_info(ts->dev, "unsupported ATE version: %u\n", ate_ver);
+ return -ENODEV;
+ }
+
+ /*
+ * We have two TSENSOR channels in a two different spots on SoC.
+ * Second channel provides more accurate data on older SoC versions,
+ * use it as a primary channel.
+ */
+ if (ate_ver <= 21) {
+ dev_info_once(ts->dev,
+ "older ATE version detected, channels remapped\n");
+ ts->swap_channels = true;
+ }
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TSENSOR_CALIB, &cal);
+ if (err) {
+ dev_err(ts->dev, "failed to get calibration data: %d\n", err);
+ return err;
+ }
+
+ /* get calibrated counter values for 25C/90C thresholds */
+ c1_25C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_LOW, cal);
+ c2_90C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_HIGH, cal);
+
+ /* and calibrated temperatures corresponding to the counter values */
+ for (i = 0; i < 7; i++) {
+ t1_25C |= tegra_tsensor_fuse_read_spare(14 + i) << i;
+ t1_25C |= tegra_tsensor_fuse_read_spare(21 + i) << i;
+
+ t2_90C |= tegra_tsensor_fuse_read_spare(0 + i) << i;
+ t2_90C |= tegra_tsensor_fuse_read_spare(7 + i) << i;
+ }
+
+ if (c2_90C - c1_25C <= t2_90C - t1_25C) {
+ dev_err(ts->dev, "invalid calibration data: %d %d %u %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C);
+ return -EINVAL;
+ }
+
+ /* all calibration coefficients are premultiplied by 1000000 */
+
+ ts->calib.a = DIV_ROUND_CLOSEST((t2_90C - t1_25C) * 1000000,
+ (c2_90C - c1_25C));
+
+ ts->calib.b = t1_25C * 1000000 - ts->calib.a * c1_25C;
+
+ if (tegra_sku_info.revision == TEGRA_REVISION_A01) {
+ ts->calib.m = -2775;
+ ts->calib.n = 1338811;
+ ts->calib.p = -7300000;
+ } else {
+ ts->calib.m = -3512;
+ ts->calib.n = 1528943;
+ ts->calib.p = -11100000;
+ }
+
+ /* except the coefficient of a reduced quadratic equation */
+ ts->calib.r = DIV_ROUND_CLOSEST(ts->calib.n, ts->calib.m * 2);
+
+ dev_info_once(ts->dev,
+ "calibration: %d %d %u %u ATE ver: %u SoC rev: %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C, ate_ver,
+ tegra_sku_info.revision);
+
+ return 0;
+}
+
+static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ unsigned int hw_id = ts->swap_channels ? !id : id;
+
+ tsc->ts = ts;
+ tsc->id = id;
+ tsc->regs = ts->regs + 0x40 * (hw_id + 1);
+
+ tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops);
+ if (IS_ERR(tsc->tzd)) {
+ if (PTR_ERR(tsc->tzd) != -ENODEV)
+ return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
+ "failed to register thermal zone\n");
+
+ /*
+ * It's okay if sensor isn't assigned to any thermal zone
+ * in a device-tree.
+ */
+ tsc->tzd = NULL;
+ return 0;
+ }
+
+ if (devm_thermal_add_hwmon_sysfs(tsc->tzd))
+ dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n");
+
+ return 0;
+}
+
+static int tegra_tsensor_probe(struct platform_device *pdev)
+{
+ struct tegra_tsensor *ts;
+ unsigned int i;
+ int err, irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ts->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ts);
+
+ ts->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ts->regs))
+ return PTR_ERR(ts->regs);
+
+ ts->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ts->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->clk),
+ "failed to get clock\n");
+
+ ts->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(ts->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->rst),
+ "failed to get reset control\n");
+
+ err = tegra_tsensor_nvmem_setup(ts);
+ if (err)
+ return err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ devm_tegra_tsensor_hw_disable,
+ ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_register_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ tegra_tsensor_isr, IRQF_ONESHOT,
+ "tegra_tsensor", ts);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request interrupt\n");
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_tsensor_suspend(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_disable_hw_channel(ts, i);
+ if (err)
+ goto enable_channel;
+ }
+
+ err = tegra_tsensor_hw_disable(ts);
+ if (err)
+ goto enable_channel;
+
+ return 0;
+
+enable_channel:
+ while (i--)
+ tegra_tsensor_enable_hw_channel(ts, i);
+
+ return err;
+}
+
+static int __maybe_unused tegra_tsensor_resume(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_tsensor_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_tsensor_suspend,
+ tegra_tsensor_resume)
+};
+
+static const struct of_device_id tegra_tsensor_of_match[] = {
+ { .compatible = "nvidia,tegra30-tsensor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_tsensor_of_match);
+
+static struct platform_driver tegra_tsensor_driver = {
+ .probe = tegra_tsensor_probe,
+ .driver = {
+ .name = "tegra30-tsensor",
+ .of_match_table = tegra_tsensor_of_match,
+ .pm = &tegra_tsensor_pm_ops,
+ },
+};
+module_platform_driver(tegra_tsensor_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra30 Thermal Sensor driver");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index a503c1b2bfd9..3d91982d8371 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -33,6 +33,16 @@ config VDPA_SIM_BLOCK
vDPA block device simulator which terminates IO request in a
memory buffer.
+config VDPA_USER
+ tristate "VDUSE (vDPA Device in Userspace) support"
+ depends on EVENTFD && MMU && HAS_DMA
+ select DMA_OPS
+ select VHOST_IOTLB
+ select IOMMU_IOVA
+ help
+ With VDUSE it is possible to emulate a vDPA Device
+ in a userspace program.
+
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
@@ -53,6 +63,7 @@ config MLX5_VDPA
config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
+ select VHOST_RING
depends on MLX5_CORE
help
VDPA network driver for ConnectX6 and newer. Provides offloading
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 67fe7f3d6943..f02ebed33f19 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
+obj-$(CONFIG_VDPA_USER) += vdpa_user/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 6e197fe0fcf9..2808f1ba9f7b 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -158,7 +158,9 @@ next:
return -EIO;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+
+ for (i = 0; i < hw->nr_vring; i++) {
ifc_iowrite16(i, &hw->common_cfg->queue_select);
notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base +
@@ -304,7 +306,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = ifc_ioread16(avail_idx_addr);
@@ -318,7 +320,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
ifc_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 2996db0da490..09918af3ecf8 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -22,17 +22,8 @@
#define N3000_DEVICE_ID 0x1041
#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define IFCVF_NET_SUPPORTED_FEATURES \
- ((1ULL << VIRTIO_NET_F_MAC) | \
- (1ULL << VIRTIO_F_ANY_LAYOUT) | \
- (1ULL << VIRTIO_F_VERSION_1) | \
- (1ULL << VIRTIO_NET_F_STATUS) | \
- (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
- (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
- (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-
-/* Only one queue pair for now. */
-#define IFCVF_MAX_QUEUE_PAIRS 1
+/* Max 8 data queue pairs(16 queues) and one control vq for now. */
+#define IFCVF_MAX_QUEUES 17
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
#define IFCVF_QUEUE_MAX 32768
@@ -51,8 +42,6 @@
#define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf)
-#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
-
struct vring_info {
u64 desc;
u64 avail;
@@ -83,7 +72,7 @@ struct ifcvf_hw {
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *net_cfg;
- struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+ struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
@@ -103,7 +92,13 @@ struct ifcvf_vring_lm_cfg {
struct ifcvf_lm_cfg {
u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
- struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+ struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
+};
+
+struct ifcvf_vdpa_mgmt_dev {
+ struct vdpa_mgmt_dev mdev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
};
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 351c6cfb24c3..dcd648e1f7e7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -63,9 +63,13 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
int vector, i, ret, irq;
+ u16 max_intr;
- ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
- IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ /* all queues and config interrupt */
+ max_intr = vf->nr_vring + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, max_intr,
+ max_intr, PCI_IRQ_MSIX);
if (ret < 0) {
IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
return ret;
@@ -83,7 +87,7 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
return ret;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
pci_name(pdev), i);
vector = i + IFCVF_MSI_QUEUE_OFF;
@@ -112,7 +116,6 @@ static int ifcvf_start_datapath(void *private)
u8 status;
int ret;
- vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
ret = ifcvf_start_hw(vf);
if (ret < 0) {
status = ifcvf_get_status(vf);
@@ -128,7 +131,7 @@ static int ifcvf_stop_datapath(void *private)
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].cb.callback = NULL;
ifcvf_stop_hw(vf);
@@ -141,7 +144,7 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].last_avail_idx = 0;
vf->vring[i].desc = 0;
vf->vring[i].avail = 0;
@@ -171,17 +174,12 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
struct pci_dev *pdev = adapter->pdev;
-
+ u32 type = vf->dev_type;
u64 features;
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
- break;
- case VIRTIO_ID_BLOCK:
+ if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
features = ifcvf_get_features(vf);
- break;
- default:
+ else {
features = 0;
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
}
@@ -218,23 +216,12 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
int ret;
vf = vdpa_to_vf(vdpa_dev);
- adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+ adapter = vdpa_to_adapter(vdpa_dev);
status_old = ifcvf_get_status(vf);
if (status_old == status)
return;
- if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
- !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
- ifcvf_stop_datapath(adapter);
- ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
- }
-
- if (status == 0) {
- ifcvf_reset_vring(adapter);
- return;
- }
-
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
ret = ifcvf_request_irq(adapter);
@@ -254,6 +241,29 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
ifcvf_set_status(vf, status);
}
+static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+ u8 status_old;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ adapter = vdpa_to_adapter(vdpa_dev);
+ status_old = ifcvf_get_status(vf);
+
+ if (status_old == 0)
+ return 0;
+
+ if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
+ ifcvf_stop_datapath(adapter);
+ ifcvf_free_irq(adapter, vf->nr_vring);
+ }
+
+ ifcvf_reset_vring(adapter);
+
+ return 0;
+}
+
static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
return IFCVF_QUEUE_MAX;
@@ -437,6 +447,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_features = ifcvf_vdpa_set_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
+ .reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
@@ -458,63 +469,63 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_notification = ifcvf_get_vq_notification,
};
-static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct virtio_device_id id_table_net[] = {
+ {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static struct virtio_device_id id_table_blk[] = {
+ {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static u32 get_dev_type(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ifcvf_adapter *adapter;
- struct ifcvf_hw *vf;
- int ret, i;
+ u32 dev_type;
- ret = pcim_enable_device(pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to enable device\n");
- return ret;
- }
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
- ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
- IFCVF_DRIVER_NAME);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- return ret;
- }
+ if (pdev->device < 0x1040)
+ dev_type = pdev->subsystem_device;
+ else
+ dev_type = pdev->device - 0x1040;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret) {
- IFCVF_ERR(pdev, "No usable DMA configuration\n");
- return ret;
- }
+ return dev_type;
+}
- ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
- if (ret) {
- IFCVF_ERR(pdev,
- "Failed for adding devres for freeing irq vectors\n");
- return ret;
- }
+static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
+ struct ifcvf_hw *vf;
+ struct device *dev;
+ int ret, i;
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ if (ifcvf_mgmt_dev->adapter)
+ return -EOPNOTSUPP;
+
+ pdev = ifcvf_mgmt_dev->pdev;
+ dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, NULL);
+ dev, &ifc_vdpa_ops, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
- pci_set_master(pdev);
- pci_set_drvdata(pdev, adapter);
+ ifcvf_mgmt_dev->adapter = adapter;
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
-
- /* This drirver drives both modern virtio devices and transitional
- * devices in modern mode.
- * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
- * so legacy devices and transitional devices in legacy
- * mode will not work for vDPA, this driver will not
- * drive devices with legacy interface.
- */
- if (pdev->device < 0x1040)
- vf->dev_type = pdev->subsystem_device;
- else
- vf->dev_type = pdev->device - 0x1040;
-
+ vf->dev_type = get_dev_type(pdev);
vf->base = pcim_iomap_table(pdev);
adapter->pdev = pdev;
@@ -526,14 +537,15 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
- ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+ ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
- IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
+ IFCVF_ERR(pdev, "Failed to register to vDPA bus");
goto err;
}
@@ -544,11 +556,100 @@ err:
return ret;
}
+static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ _vdpa_unregister_device(dev);
+ ifcvf_mgmt_dev->adapter = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
+ .dev_add = ifcvf_vdpa_dev_add,
+ .dev_del = ifcvf_vdpa_dev_del
+};
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct device *dev = &pdev->dev;
+ u32 dev_type;
+ int ret;
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->pdev = pdev;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ goto err;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ goto err;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+
+ ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to initialize the management interfaces\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(ifcvf_mgmt_dev);
+ return ret;
+}
+
static void ifcvf_remove(struct pci_dev *pdev)
{
- struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
- vdpa_unregister_device(&adapter->vdpa);
+ ifcvf_mgmt_dev = pci_get_drvdata(pdev);
+ vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
+ kfree(ifcvf_mgmt_dev);
}
static struct pci_device_id ifcvf_pci_ids[] = {
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 0002b2136b48..01a848adf590 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -5,7 +5,7 @@
#define __MLX5_VDPA_H__
#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
+#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
@@ -48,6 +48,26 @@ struct mlx5_vdpa_resources {
bool valid;
};
+struct mlx5_control_vq {
+ struct vhost_iotlb *iotlb;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
+ struct vringh vring;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ struct vdpa_callback event_cb;
+ struct vringh_kiov riov;
+ struct vringh_kiov wiov;
+ unsigned short head;
+};
+
+struct mlx5_ctrl_wq_ent {
+ struct work_struct work;
+ struct mlx5_vdpa_dev *mvdev;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -57,9 +77,12 @@ struct mlx5_vdpa_dev {
u64 actual_features;
u8 status;
u32 max_vqs;
+ u16 max_idx;
u32 generation;
struct mlx5_vdpa_mr mr;
+ struct mlx5_control_vq cvq;
+ struct workqueue_struct *wq;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
@@ -68,6 +91,7 @@ int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn);
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index e59135fa867e..ff010c6d0cd3 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/vhost_types.h>
#include <linux/vdpa.h>
#include <linux/gcd.h>
#include <linux/string.h>
@@ -451,33 +452,30 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct vhost_iotlb_map *map;
+ u64 start = 0, last = ULLONG_MAX;
int err;
- if (mr->initialized)
- return 0;
-
- if (iotlb)
- err = create_user_mr(mvdev, iotlb);
- else
- err = create_dma_mr(mvdev, mr);
-
- if (!err)
- mr->initialized = true;
+ if (!src) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+ return err;
+ }
- return err;
+ for (map = vhost_iotlb_itree_first(src, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+ map->addr, map->perm);
+ if (err)
+ return err;
+ }
+ return 0;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
{
- int err;
-
- mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb);
- mutex_unlock(&mvdev->mr.mkey_mtx);
- return err;
+ vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -501,6 +499,7 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
if (!mr->initialized)
goto out;
+ prune_iotlb(mvdev);
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
@@ -512,6 +511,48 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+ if (mr->initialized)
+ return 0;
+
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
+
+ if (err)
+ return err;
+
+ err = dup_iotlb(mvdev, iotlb);
+ if (err)
+ goto out_err;
+
+ mr->initialized = true;
+ return 0;
+
+out_err:
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ mutex_lock(&mvdev->mr.mkey_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mvdev->mr.mkey_mtx);
+ return err;
+}
+
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index d4606213f88a..15e266d0e27a 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/iova.h>
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
@@ -128,6 +129,16 @@ int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *
return err;
}
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+
+ MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+ return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+}
+
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
@@ -221,6 +232,22 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *m
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
+static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
+ if (!mvdev->cvq.iotlb)
+ return -ENOMEM;
+
+ vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
+
+ return 0;
+}
+
+static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ vhost_iotlb_free(mvdev->cvq.iotlb);
+}
+
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
@@ -260,10 +287,17 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err = -ENOMEM;
goto err_key;
}
+
+ err = init_ctrl_vq(mvdev);
+ if (err)
+ goto err_ctrl;
+
res->valid = true;
return 0;
+err_ctrl:
+ iounmap(res->kick_addr);
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
@@ -282,6 +316,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
if (!res->valid)
return;
+ cleanup_ctrl_vq(mvdev);
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5906cada2293..294ba05e6fc9 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL");
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
@@ -90,7 +92,6 @@ struct mlx5_vq_restore_info {
u16 avail_index;
u16 used_index;
bool ready;
- struct vdpa_callback cb;
bool restore;
};
@@ -100,7 +101,6 @@ struct mlx5_vdpa_virtqueue {
u64 device_addr;
u64 driver_addr;
u32 num_ent;
- struct vdpa_callback event_cb;
/* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the
@@ -135,11 +135,20 @@ struct mlx5_vdpa_virtqueue {
*/
#define MLX5_MAX_SUPPORTED_VQS 16
+static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ if (unlikely(idx > mvdev->max_idx))
+ return false;
+
+ return true;
+}
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+ struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -151,15 +160,18 @@ struct mlx5_vdpa_net {
struct mlx5_flow_handle *rx_rule;
bool setup;
u16 mtu;
+ u32 cur_num_vqs;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
static void init_mvqs(struct mlx5_vdpa_net *ndev);
-static int setup_driver(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_dev *mvdev);
static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
+#define MLX5_CVQ_MAX_ENT 16
+
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -172,11 +184,41 @@ static bool mlx5_vdpa_debug;
mlx5_vdpa_info(mvdev, "%s\n", #_status); \
} while (0)
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+ return virtio_legacy_is_little_endian() ||
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
+}
+
+static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
+static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+{
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
static inline u32 mlx5_vdpa_max_qps(int max_vqs)
{
return max_vqs / 2;
}
+static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
+{
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ return 2;
+
+ return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+}
+
+static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ return idx == ctrl_vq_idx(mvdev);
+}
+
static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
{
if (status & ~VALID_STATUS_MASK)
@@ -481,6 +523,10 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
+ struct mlx5_vdpa_net *ndev = mvq->ndev;
+ struct vdpa_callback *event_cb;
+
+ event_cb = &ndev->event_cbs[mvq->index];
mlx5_cq_set_ci(&mvq->cq.mcq);
/* make sure CQ cosumer update is visible to the hardware before updating
@@ -488,8 +534,8 @@ static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int nu
*/
dma_wmb();
rx_post(&mvq->vqqp, num);
- if (mvq->event_cb.callback)
- mvq->event_cb.callback(mvq->event_cb.private);
+ if (event_cb->callback)
+ event_cb->callback(event_cb->private);
}
static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
@@ -1100,10 +1146,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (!mvq->num_ent)
return 0;
- if (mvq->initialized) {
- mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
- return -EINVAL;
- }
+ if (mvq->initialized)
+ return 0;
err = cq_create(ndev, idx, mvq->num_ent);
if (err)
@@ -1190,19 +1234,20 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
- int log_max_rqt;
__be32 *list;
+ int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (log_max_rqt < 1)
+ max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
return -EOPNOTSUPP;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1211,10 +1256,9 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+ for (i = 0, j = 0; j < max_rqt; j++) {
if (!ndev->vqs[j].initialized)
continue;
@@ -1223,6 +1267,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
i++;
}
}
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
@@ -1232,6 +1277,52 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
return 0;
}
+#define MLX5_MODIFY_RQT_NUM_RQS ((u64)1)
+
+static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+{
+ __be32 *list;
+ int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+ max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid);
+ MLX5_SET64(modify_rqt_in, in, bitmask, MLX5_MODIFY_RQT_NUM_RQS);
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+ for (i = 0, j = 0; j < num; j++) {
+ if (!ndev->vqs[j].initialized)
+ continue;
+
+ if (!vq_is_tx(ndev->vqs[j].index)) {
+ list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+ i++;
+ }
+ }
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
+ kfree(in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static void destroy_rqt(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
@@ -1345,12 +1436,206 @@ static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
ndev->rx_rule = NULL;
}
+static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_core_dev *pfmdev;
+ size_t read;
+ u8 mac[ETH_ALEN];
+
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN);
+ if (read != ETH_ALEN)
+ break;
+
+ if (!memcmp(ndev->config.mac, mac, 6)) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
+ ndev->config.mac);
+ break;
+ }
+ }
+
+ if (mlx5_mpfs_add_mac(pfmdev, mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n",
+ mac);
+ break;
+ }
+
+ memcpy(ndev->config.mac, mac, ETH_ALEN);
+ status = VIRTIO_NET_OK;
+ break;
+
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int cur_qps = ndev->cur_num_vqs / 2;
+ int err;
+ int i;
+
+ if (cur_qps > newqps) {
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ return err;
+
+ for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ ndev->cur_num_vqs = 2 * newqps;
+ } else {
+ ndev->cur_num_vqs = 2 * newqps;
+ for (i = cur_qps * 2; i < 2 * newqps; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto clean_added;
+ }
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ goto clean_added;
+ }
+ return 0;
+
+clean_added:
+ for (--i; i >= cur_qps; --i)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ return err;
+}
+
+static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ struct virtio_net_ctrl_mq mq;
+ size_t read;
+ u16 newqps;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
+ if (read != sizeof(mq))
+ break;
+
+ newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (ndev->cur_num_vqs == 2 * newqps) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (newqps & (newqps - 1))
+ break;
+
+ if (!change_num_qps(mvdev, newqps))
+ status = VIRTIO_NET_OK;
+
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static void mlx5_cvq_kick_handler(struct work_struct *work)
+{
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct virtio_net_ctrl_hdr ctrl;
+ struct mlx5_ctrl_wq_ent *wqent;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_control_vq *cvq;
+ struct mlx5_vdpa_net *ndev;
+ size_t read, write;
+ int err;
+
+ wqent = container_of(work, struct mlx5_ctrl_wq_ent, work);
+ mvdev = wqent->mvdev;
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ cvq = &mvdev->cvq;
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ goto out;
+
+ if (!cvq->ready)
+ goto out;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
+ GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl));
+ if (read != sizeof(ctrl))
+ break;
+
+ switch (ctrl.class) {
+ case VIRTIO_NET_CTRL_MAC:
+ status = handle_ctrl_mac(mvdev, ctrl.cmd);
+ break;
+ case VIRTIO_NET_CTRL_MQ:
+ status = handle_ctrl_mq(mvdev, ctrl.cmd);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Make sure data is written before advancing index */
+ smp_wmb();
+
+ write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status));
+ vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+ vringh_kiov_cleanup(&cvq->riov);
+ vringh_kiov_cleanup(&cvq->wiov);
+
+ if (vringh_need_notify_iotlb(&cvq->vring))
+ vringh_notify(&cvq->vring);
+ }
+out:
+ kfree(wqent);
+}
+
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_ctrl_wq_ent *wqent;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
+ if (!mvdev->cvq.ready)
+ return;
+
+ wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+ if (!wqent)
+ return;
+ wqent->mvdev = mvdev;
+ INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
+ queue_work(mvdev->wq, &wqent->work);
+ return;
+ }
+
+ mvq = &ndev->vqs[idx];
if (unlikely(!mvq->ready))
return;
@@ -1362,8 +1647,19 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.desc_addr = desc_area;
+ mvdev->cvq.device_addr = device_area;
+ mvdev->cvq.driver_addr = driver_area;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
mvq->desc_addr = desc_area;
mvq->device_addr = device_area;
mvq->driver_addr = driver_area;
@@ -1376,6 +1672,9 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return;
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -1384,17 +1683,46 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
- vq->event_cb = *cb;
+ ndev->event_cbs[idx] = *cb;
+}
+
+static void mlx5_cvq_notify(struct vringh *vring)
+{
+ struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring);
+
+ if (!cvq->event_cb.callback)
+ return;
+
+ cvq->event_cb.callback(cvq->event_cb.private);
+}
+
+static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready)
+{
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->ready = ready;
+ if (!ready)
+ return;
+
+ cvq->vring.notify = mlx5_cvq_notify;
}
static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ set_cvq_ready(mvdev, ready);
+ return;
+ }
+ mvq = &ndev->vqs[idx];
if (!ready)
suspend_vq(ndev, mvq);
@@ -1405,9 +1733,14 @@ static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
- return mvq->ready;
+ if (!is_index_valid(mvdev, idx))
+ return false;
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return mvdev->cvq.ready;
+
+ return ndev->vqs[idx].ready;
}
static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
@@ -1415,8 +1748,17 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.vring.last_avail_idx = state->split.avail_index;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
return -EINVAL;
@@ -1431,10 +1773,19 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_virtq_attr attr;
int err;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ state->split.avail_index = mvdev->cvq.vring.last_avail_idx;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
/* If the virtq object was destroyed, use the value saved at
* the last minute of suspend_vq. This caters for userspace
* that cares about emulating the index after vq is stopped.
@@ -1491,10 +1842,14 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
u16 dev_features;
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+ ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
}
@@ -1507,17 +1862,29 @@ static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
return 0;
}
-static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
int err;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
}
+ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ MLX5_CVQ_MAX_ENT, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+ if (err)
+ goto err_vq;
+ }
+
return 0;
err_vq:
@@ -1541,16 +1908,22 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-/* TODO: cross-endian support */
-static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
-{
- return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
-}
-
-static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
{
- return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
+ /* MQ supported. CVQ index is right above the last data virtqueue's */
+ mvdev->max_idx = mvdev->max_vqs;
+ } else {
+ /* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
+ * CVQ gets index 2
+ */
+ mvdev->max_idx = 2;
+ }
+ } else {
+ /* Two data virtqueues only: one for rx and one for tx */
+ mvdev->max_idx = 1;
+ }
}
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
@@ -1568,6 +1941,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ update_cvq_info(mvdev);
return err;
}
@@ -1605,15 +1979,14 @@ static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vq_restore_info *ri = &mvq->ri;
- struct mlx5_virtq_attr attr;
+ struct mlx5_virtq_attr attr = {};
int err;
- if (!mvq->initialized)
- return 0;
-
- err = query_virtqueue(ndev, mvq, &attr);
- if (err)
- return err;
+ if (mvq->initialized) {
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err)
+ return err;
+ }
ri->avail_index = attr.available_index;
ri->used_index = attr.used_index;
@@ -1622,7 +1995,6 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
ri->desc_addr = mvq->desc_addr;
ri->device_addr = mvq->device_addr;
ri->driver_addr = mvq->driver_addr;
- ri->cb = mvq->event_cb;
ri->restore = true;
return 0;
}
@@ -1667,12 +2039,12 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
mvq->desc_addr = ri->desc_addr;
mvq->device_addr = ri->device_addr;
mvq->driver_addr = ri->driver_addr;
- mvq->event_cb = ri->cb;
}
}
-static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
suspend_vqs(ndev);
@@ -1681,58 +2053,59 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
goto err_mr;
teardown_driver(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+ mlx5_vdpa_destroy_mr(mvdev);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb);
if (err)
goto err_mr;
- if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return 0;
restore_channels_info(ndev);
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err)
goto err_setup;
return 0;
err_setup:
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ mlx5_vdpa_destroy_mr(mvdev);
err_mr:
return err;
}
-static int setup_driver(struct mlx5_vdpa_net *ndev)
+static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
mutex_lock(&ndev->reslock);
if (ndev->setup) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+ mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
goto out;
}
- err = setup_virtqueues(ndev);
+ err = setup_virtqueues(mvdev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+ mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
goto out;
}
err = create_rqt(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+ mlx5_vdpa_warn(mvdev, "create_rqt\n");
goto err_rqt;
}
err = create_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+ mlx5_vdpa_warn(mvdev, "create_tir\n");
goto err_tir;
}
err = add_fwd_to_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+ mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
goto err_fwd;
}
ndev->setup = true;
@@ -1781,24 +2154,10 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
int err;
print_status(mvdev, status, true);
- if (!status) {
- mlx5_vdpa_info(mvdev, "performing device reset\n");
- teardown_driver(ndev);
- clear_vqs_ready(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- ndev->mvdev.status = 0;
- ndev->mvdev.mlx_features = 0;
- ++mvdev->generation;
- if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- if (mlx5_vdpa_create_mr(mvdev, NULL))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
- }
- return;
- }
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
goto err_setup;
@@ -1817,6 +2176,29 @@ err_setup:
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
}
+static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ print_status(mvdev, 0, true);
+ mlx5_vdpa_info(mvdev, "performing device reset\n");
+ teardown_driver(ndev);
+ clear_vqs_ready(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
+ ndev->mvdev.mlx_features = 0;
+ memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->mvdev.actual_features = 0;
+ ++mvdev->generation;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_mr(mvdev, NULL))
+ mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ }
+
+ return 0;
+}
+
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
{
return sizeof(struct virtio_net_config);
@@ -1848,7 +2230,6 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
@@ -1859,7 +2240,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
}
if (change_map)
- return mlx5_vdpa_change_map(ndev, iotlb);
+ return mlx5_vdpa_change_map(mvdev, iotlb);
return 0;
}
@@ -1889,6 +2270,9 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device
struct mlx5_vdpa_net *ndev;
phys_addr_t addr;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return ret;
+
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
* notification to avoid the risk of mapping pages that contain BAR of more
* than one SF
@@ -1928,6 +2312,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
+ .reset = mlx5_vdpa_reset,
.get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
@@ -2040,7 +2425,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name);
+ name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2063,8 +2448,11 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_mtu;
+
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
+ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
@@ -2080,8 +2468,15 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mr;
+ mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq");
+ if (!mvdev->wq) {
+ err = -ENOMEM;
+ goto err_res2;
+ }
+
+ ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
+ err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
if (err)
goto err_reg;
@@ -2089,6 +2484,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return 0;
err_reg:
+ destroy_workqueue(mvdev->wq);
+err_res2:
free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
@@ -2106,7 +2503,9 @@ err_mtu:
static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
+ destroy_workqueue(mvdev->wq);
_vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 3fc4525fc05c..1dc121a07a93 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -69,6 +69,7 @@ static void vdpa_release_dev(struct device *d)
* @config: the bus operations that is supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
+ * @use_va: indicate whether virtual address must be used by this device
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
@@ -78,7 +79,8 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- size_t size, const char *name)
+ size_t size, const char *name,
+ bool use_va)
{
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -89,6 +91,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (!!config->dma_map != !!config->dma_unmap)
goto err;
+ /* It should only work for the device that use on-chip IOMMU */
+ if (use_va && !(config->dma_map || config->set_map))
+ goto err;
+
err = -ENOMEM;
vdev = kzalloc(size, GFP_KERNEL);
if (!vdev)
@@ -104,6 +110,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->index = err;
vdev->config = config;
vdev->features_valid = false;
+ vdev->use_va = use_va;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c621cf7feec0..5f484fff8dbe 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -92,7 +92,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
vq->vring.notify = NULL;
}
-static void vdpasim_reset(struct vdpasim *vdpasim)
+static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
@@ -137,7 +137,8 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
int ret;
/* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
- iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
+ iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
+ ULONG_MAX - 1, true);
if (!iova)
return DMA_MAPPING_ERROR;
@@ -250,7 +251,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
- dev_attr->name);
+ dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
goto err_alloc;
@@ -459,11 +460,21 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
spin_lock(&vdpasim->lock);
vdpasim->status = status;
- if (status == 0)
- vdpasim_reset(vdpasim);
spin_unlock(&vdpasim->lock);
}
+static int vdpasim_reset(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->status = 0;
+ vdpasim_do_reset(vdpasim);
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -544,14 +555,14 @@ err:
}
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
- u64 pa, u32 perm)
+ u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
- perm);
+ ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
+ pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
@@ -607,6 +618,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -635,6 +647,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_user/Makefile b/drivers/vdpa/vdpa_user/Makefile
new file mode 100644
index 000000000000..260e0b26af99
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+vduse-y := vduse_dev.o iova_domain.o
+
+obj-$(CONFIG_VDPA_USER) += vduse.o
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
new file mode 100644
index 000000000000..1daae2608860
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/vdpa.h>
+
+#include "iova_domain.h"
+
+static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ struct file *file, u64 offset)
+{
+ struct vdpa_map_file *map_file;
+ int ret;
+
+ map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
+ if (!map_file)
+ return -ENOMEM;
+
+ map_file->file = get_file(file);
+ map_file->offset = offset;
+
+ ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
+ addr, perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ return ret;
+ }
+ return 0;
+}
+
+static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(domain->iotlb, map);
+ }
+}
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+ int ret;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, start, last);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ ret = vduse_iotlb_add_range(domain, map->start, map->last,
+ map->addr, map->perm,
+ map_file->file,
+ map_file->offset);
+ if (ret)
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ return 0;
+err:
+ vduse_iotlb_del_range(domain, start, last);
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+
+ spin_lock(&domain->iotlb_lock);
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ vduse_iotlb_del_range(domain, map->start, map->last);
+ }
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size, u64 paddr)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page) {
+ map->bounce_page = alloc_page(GFP_ATOMIC);
+ if (!map->bounce_page)
+ return -ENOMEM;
+ }
+ map->orig_phys = paddr;
+ paddr += PAGE_SIZE;
+ iova += PAGE_SIZE;
+ }
+ return 0;
+}
+
+static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ iova += PAGE_SIZE;
+ }
+}
+
+static void do_bounce(phys_addr_t orig, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(orig);
+ unsigned int offset = offset_in_page(orig);
+ char *buffer;
+ unsigned int sz = 0;
+
+ while (size) {
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ buffer = kmap_atomic(pfn_to_page(pfn));
+ if (dir == DMA_TO_DEVICE)
+ memcpy(addr, buffer + offset, sz);
+ else
+ memcpy(buffer + offset, addr, sz);
+ kunmap_atomic(buffer);
+
+ size -= sz;
+ pfn++;
+ addr += sz;
+ offset = 0;
+ }
+}
+
+static void vduse_domain_bounce(struct vduse_iova_domain *domain,
+ dma_addr_t iova, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_bounce_map *map;
+ unsigned int offset;
+ void *addr;
+ size_t sz;
+
+ if (iova >= domain->bounce_size)
+ return;
+
+ while (size) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ offset = offset_in_page(iova);
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ if (WARN_ON(!map->bounce_page ||
+ map->orig_phys == INVALID_PHYS_ADDR))
+ return;
+
+ addr = page_address(map->bounce_page) + offset;
+ do_bounce(map->orig_phys + offset, addr, sz, dir);
+ size -= sz;
+ iova += sz;
+ }
+}
+
+static struct page *
+vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ u64 start = iova & PAGE_MASK;
+ u64 last = start + PAGE_SIZE - 1;
+ struct vhost_iotlb_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, start, last);
+ if (!map)
+ goto out;
+
+ page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static struct page *
+vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ struct vduse_bounce_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page)
+ goto out;
+
+ page = map->bounce_page;
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static void
+vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
+ continue;
+
+ if (!map->bounce_page)
+ continue;
+
+ __free_page(map->bounce_page);
+ map->bounce_page = NULL;
+ }
+}
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
+{
+ if (!domain->bounce_map)
+ return;
+
+ spin_lock(&domain->iotlb_lock);
+ if (!domain->bounce_map)
+ goto unlock;
+
+ vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
+ domain->bounce_map = 0;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
+{
+ int ret = 0;
+
+ if (domain->bounce_map)
+ return 0;
+
+ spin_lock(&domain->iotlb_lock);
+ if (domain->bounce_map)
+ goto unlock;
+
+ ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
+ 0, VHOST_MAP_RW, domain->file, 0);
+ if (ret)
+ goto unlock;
+
+ domain->bounce_map = 1;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+static dma_addr_t
+vduse_domain_alloc_iova(struct iova_domain *iovad,
+ unsigned long size, unsigned long limit)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+ unsigned long iova_pfn;
+
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
+ iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
+
+ return iova_pfn << shift;
+}
+
+static void vduse_domain_free_iova(struct iova_domain *iovad,
+ dma_addr_t iova, size_t size)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+
+ free_iova_fast(iovad, iova >> shift, iova_len);
+}
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+ unsigned long limit = domain->bounce_size - 1;
+ phys_addr_t pa = page_to_phys(page) + offset;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ if (vduse_domain_init_bounce_map(domain))
+ goto err;
+
+ if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
+ goto err;
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+
+ return iova;
+err:
+ vduse_domain_free_iova(iovad, iova, size);
+ return DMA_MAPPING_ERROR;
+}
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
+
+ vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ vduse_domain_free_iova(iovad, dma_addr, size);
+}
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ unsigned long limit = domain->iova_limit;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+ void *orig = alloc_pages_exact(size, flag);
+
+ if (!iova || !orig)
+ goto err;
+
+ spin_lock(&domain->iotlb_lock);
+ if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
+ virt_to_phys(orig), VHOST_MAP_RW,
+ domain->file, (u64)iova)) {
+ spin_unlock(&domain->iotlb_lock);
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ *dma_addr = iova;
+
+ return orig;
+err:
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (orig)
+ free_pages_exact(orig, size);
+ if (iova)
+ vduse_domain_free_iova(iovad, iova, size);
+
+ return NULL;
+}
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ phys_addr_t pa;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ if (WARN_ON(!map)) {
+ spin_unlock(&domain->iotlb_lock);
+ return;
+ }
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ pa = map->addr;
+ vhost_iotlb_map_free(domain->iotlb, map);
+ spin_unlock(&domain->iotlb_lock);
+
+ vduse_domain_free_iova(iovad, dma_addr, size);
+ free_pages_exact(phys_to_virt(pa), size);
+}
+
+static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
+{
+ struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
+ unsigned long iova = vmf->pgoff << PAGE_SHIFT;
+ struct page *page;
+
+ if (!domain)
+ return VM_FAULT_SIGBUS;
+
+ if (iova < domain->bounce_size)
+ page = vduse_domain_get_bounce_page(domain, iova);
+ else
+ page = vduse_domain_get_coherent_page(domain, iova);
+
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct vduse_domain_mmap_ops = {
+ .fault = vduse_domain_mmap_fault,
+};
+
+static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ vma->vm_private_data = domain;
+ vma->vm_ops = &vduse_domain_mmap_ops;
+
+ return 0;
+}
+
+static int vduse_domain_release(struct inode *inode, struct file *file)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
+ vduse_domain_free_bounce_pages(domain);
+ spin_unlock(&domain->iotlb_lock);
+ put_iova_domain(&domain->stream_iovad);
+ put_iova_domain(&domain->consistent_iovad);
+ vhost_iotlb_free(domain->iotlb);
+ vfree(domain->bounce_maps);
+ kfree(domain);
+
+ return 0;
+}
+
+static const struct file_operations vduse_domain_fops = {
+ .owner = THIS_MODULE,
+ .mmap = vduse_domain_mmap,
+ .release = vduse_domain_release,
+};
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain)
+{
+ fput(domain->file);
+}
+
+struct vduse_iova_domain *
+vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
+{
+ struct vduse_iova_domain *domain;
+ struct file *file;
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
+ if (iova_limit <= bounce_size)
+ return NULL;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!domain->iotlb)
+ goto err_iotlb;
+
+ domain->iova_limit = iova_limit;
+ domain->bounce_size = PAGE_ALIGN(bounce_size);
+ domain->bounce_maps = vzalloc(bounce_pfns *
+ sizeof(struct vduse_bounce_map));
+ if (!domain->bounce_maps)
+ goto err_map;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ }
+ file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
+ domain, O_RDWR);
+ if (IS_ERR(file))
+ goto err_file;
+
+ domain->file = file;
+ spin_lock_init(&domain->iotlb_lock);
+ init_iova_domain(&domain->stream_iovad,
+ PAGE_SIZE, IOVA_START_PFN);
+ init_iova_domain(&domain->consistent_iovad,
+ PAGE_SIZE, bounce_pfns);
+
+ return domain;
+err_file:
+ vfree(domain->bounce_maps);
+err_map:
+ vhost_iotlb_free(domain->iotlb);
+err_iotlb:
+ kfree(domain);
+ return NULL;
+}
+
+int vduse_domain_init(void)
+{
+ return iova_cache_get();
+}
+
+void vduse_domain_exit(void)
+{
+ iova_cache_put();
+}
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
new file mode 100644
index 000000000000..2722d9b8e21a
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#ifndef _VDUSE_IOVA_DOMAIN_H
+#define _VDUSE_IOVA_DOMAIN_H
+
+#include <linux/iova.h>
+#include <linux/dma-mapping.h>
+#include <linux/vhost_iotlb.h>
+
+#define IOVA_START_PFN 1
+
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
+struct vduse_bounce_map {
+ struct page *bounce_page;
+ u64 orig_phys;
+};
+
+struct vduse_iova_domain {
+ struct iova_domain stream_iovad;
+ struct iova_domain consistent_iovad;
+ struct vduse_bounce_map *bounce_maps;
+ size_t bounce_size;
+ unsigned long iova_limit;
+ int bounce_map;
+ struct vhost_iotlb *iotlb;
+ spinlock_t iotlb_lock;
+ struct file *file;
+};
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs);
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs);
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain);
+
+struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
+ size_t bounce_size);
+
+int vduse_domain_init(void);
+
+void vduse_domain_exit(void);
+
+#endif /* _VDUSE_IOVA_DOMAIN_H */
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
new file mode 100644
index 000000000000..29a38ecba19e
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDUSE: vDPA Device in Userspace
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/dma-map-ops.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <uapi/linux/vduse.h>
+#include <uapi/linux/vdpa.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_blk.h>
+#include <linux/mod_devicetable.h>
+
+#include "iova_domain.h"
+
+#define DRV_AUTHOR "Yongji Xie <xieyongji@bytedance.com>"
+#define DRV_DESC "vDPA Device in Userspace"
+#define DRV_LICENSE "GPL v2"
+
+#define VDUSE_DEV_MAX (1U << MINORBITS)
+#define VDUSE_BOUNCE_SIZE (64 * 1024 * 1024)
+#define VDUSE_IOVA_SIZE (128 * 1024 * 1024)
+#define VDUSE_MSG_DEFAULT_TIMEOUT 30
+
+struct vduse_virtqueue {
+ u16 index;
+ u16 num_max;
+ u32 num;
+ u64 desc_addr;
+ u64 driver_addr;
+ u64 device_addr;
+ struct vdpa_vq_state state;
+ bool ready;
+ bool kicked;
+ spinlock_t kick_lock;
+ spinlock_t irq_lock;
+ struct eventfd_ctx *kickfd;
+ struct vdpa_callback cb;
+ struct work_struct inject;
+ struct work_struct kick;
+};
+
+struct vduse_dev;
+
+struct vduse_vdpa {
+ struct vdpa_device vdpa;
+ struct vduse_dev *dev;
+};
+
+struct vduse_dev {
+ struct vduse_vdpa *vdev;
+ struct device *dev;
+ struct vduse_virtqueue *vqs;
+ struct vduse_iova_domain *domain;
+ char *name;
+ struct mutex lock;
+ spinlock_t msg_lock;
+ u64 msg_unique;
+ u32 msg_timeout;
+ wait_queue_head_t waitq;
+ struct list_head send_list;
+ struct list_head recv_list;
+ struct vdpa_callback config_cb;
+ struct work_struct inject;
+ spinlock_t irq_lock;
+ int minor;
+ bool broken;
+ bool connected;
+ u64 api_version;
+ u64 device_features;
+ u64 driver_features;
+ u32 device_id;
+ u32 vendor_id;
+ u32 generation;
+ u32 config_size;
+ void *config;
+ u8 status;
+ u32 vq_num;
+ u32 vq_align;
+};
+
+struct vduse_dev_msg {
+ struct vduse_dev_request req;
+ struct vduse_dev_response resp;
+ struct list_head list;
+ wait_queue_head_t waitq;
+ bool completed;
+};
+
+struct vduse_control {
+ u64 api_version;
+};
+
+static DEFINE_MUTEX(vduse_lock);
+static DEFINE_IDR(vduse_idr);
+
+static dev_t vduse_major;
+static struct class *vduse_class;
+static struct cdev vduse_ctrl_cdev;
+static struct cdev vduse_cdev;
+static struct workqueue_struct *vduse_irq_wq;
+
+static u32 allowed_device_id[] = {
+ VIRTIO_ID_BLOCK,
+};
+
+static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
+{
+ struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa);
+
+ return vdev->dev;
+}
+
+static inline struct vduse_dev *dev_to_vduse(struct device *dev)
+{
+ struct vdpa_device *vdpa = dev_to_vdpa(dev);
+
+ return vdpa_to_vduse(vdpa);
+}
+
+static struct vduse_dev_msg *vduse_find_msg(struct list_head *head,
+ uint32_t request_id)
+{
+ struct vduse_dev_msg *msg;
+
+ list_for_each_entry(msg, head, list) {
+ if (msg->req.request_id == request_id) {
+ list_del(&msg->list);
+ return msg;
+ }
+ }
+
+ return NULL;
+}
+
+static struct vduse_dev_msg *vduse_dequeue_msg(struct list_head *head)
+{
+ struct vduse_dev_msg *msg = NULL;
+
+ if (!list_empty(head)) {
+ msg = list_first_entry(head, struct vduse_dev_msg, list);
+ list_del(&msg->list);
+ }
+
+ return msg;
+}
+
+static void vduse_enqueue_msg(struct list_head *head,
+ struct vduse_dev_msg *msg)
+{
+ list_add_tail(&msg->list, head);
+}
+
+static void vduse_dev_broken(struct vduse_dev *dev)
+{
+ struct vduse_dev_msg *msg, *tmp;
+
+ if (unlikely(dev->broken))
+ return;
+
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ list_for_each_entry_safe(msg, tmp, &dev->send_list, list) {
+ list_del(&msg->list);
+ msg->completed = 1;
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ wake_up(&msg->waitq);
+ }
+ dev->broken = true;
+ wake_up(&dev->waitq);
+}
+
+static int vduse_dev_msg_sync(struct vduse_dev *dev,
+ struct vduse_dev_msg *msg)
+{
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EIO;
+
+ init_waitqueue_head(&msg->waitq);
+ spin_lock(&dev->msg_lock);
+ if (unlikely(dev->broken)) {
+ spin_unlock(&dev->msg_lock);
+ return -EIO;
+ }
+ msg->req.request_id = dev->msg_unique++;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ wake_up(&dev->waitq);
+ spin_unlock(&dev->msg_lock);
+ if (dev->msg_timeout)
+ ret = wait_event_killable_timeout(msg->waitq, msg->completed,
+ (long)dev->msg_timeout * HZ);
+ else
+ ret = wait_event_killable(msg->waitq, msg->completed);
+
+ spin_lock(&dev->msg_lock);
+ if (!msg->completed) {
+ list_del(&msg->list);
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ /* Mark the device as malfunction when there is a timeout */
+ if (!ret)
+ vduse_dev_broken(dev);
+ }
+ ret = (msg->resp.result == VDUSE_REQ_RESULT_OK) ? 0 : -EIO;
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static int vduse_dev_get_vq_state_packed(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_packed *packed)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ packed->last_avail_counter =
+ msg.resp.vq_state.packed.last_avail_counter & 0x0001;
+ packed->last_avail_idx =
+ msg.resp.vq_state.packed.last_avail_idx & 0x7FFF;
+ packed->last_used_counter =
+ msg.resp.vq_state.packed.last_used_counter & 0x0001;
+ packed->last_used_idx =
+ msg.resp.vq_state.packed.last_used_idx & 0x7FFF;
+
+ return 0;
+}
+
+static int vduse_dev_get_vq_state_split(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_split *split)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ split->avail_index = msg.resp.vq_state.split.avail_index;
+
+ return 0;
+}
+
+static int vduse_dev_set_status(struct vduse_dev *dev, u8 status)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ msg.req.type = VDUSE_SET_STATUS;
+ msg.req.s.status = status;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static int vduse_dev_update_iotlb(struct vduse_dev *dev,
+ u64 start, u64 last)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ if (last < start)
+ return -EINVAL;
+
+ msg.req.type = VDUSE_UPDATE_IOTLB;
+ msg.req.iova.start = start;
+ msg.req.iova.last = last;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static ssize_t vduse_dev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_msg *msg;
+ int size = sizeof(struct vduse_dev_request);
+ ssize_t ret;
+
+ if (iov_iter_count(to) < size)
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ while (1) {
+ msg = vduse_dequeue_msg(&dev->send_list);
+ if (msg)
+ break;
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ goto unlock;
+
+ spin_unlock(&dev->msg_lock);
+ ret = wait_event_interruptible_exclusive(dev->waitq,
+ !list_empty(&dev->send_list));
+ if (ret)
+ return ret;
+
+ spin_lock(&dev->msg_lock);
+ }
+ spin_unlock(&dev->msg_lock);
+ ret = copy_to_iter(&msg->req, size, to);
+ spin_lock(&dev->msg_lock);
+ if (ret != size) {
+ ret = -EFAULT;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ goto unlock;
+ }
+ vduse_enqueue_msg(&dev->recv_list, msg);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static bool is_mem_zero(const char *ptr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i])
+ return false;
+ }
+ return true;
+}
+
+static ssize_t vduse_dev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_response resp;
+ struct vduse_dev_msg *msg;
+ size_t ret;
+
+ ret = copy_from_iter(&resp, sizeof(resp), from);
+ if (ret != sizeof(resp))
+ return -EINVAL;
+
+ if (!is_mem_zero((const char *)resp.reserved, sizeof(resp.reserved)))
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ msg = vduse_find_msg(&dev->recv_list, resp.request_id);
+ if (!msg) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ memcpy(&msg->resp, &resp, sizeof(resp));
+ msg->completed = 1;
+ wake_up(&msg->waitq);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static __poll_t vduse_dev_poll(struct file *file, poll_table *wait)
+{
+ struct vduse_dev *dev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &dev->waitq, wait);
+
+ spin_lock(&dev->msg_lock);
+
+ if (unlikely(dev->broken))
+ mask |= EPOLLERR;
+ if (!list_empty(&dev->send_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!list_empty(&dev->recv_list))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ spin_unlock(&dev->msg_lock);
+
+ return mask;
+}
+
+static void vduse_dev_reset(struct vduse_dev *dev)
+{
+ int i;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ /* The coherent mappings are handled in vduse_dev_free_coherent() */
+ if (domain->bounce_map)
+ vduse_domain_reset_bounce_map(domain);
+
+ dev->status = 0;
+ dev->driver_features = 0;
+ dev->generation++;
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = NULL;
+ dev->config_cb.private = NULL;
+ spin_unlock(&dev->irq_lock);
+ flush_work(&dev->inject);
+
+ for (i = 0; i < dev->vq_num; i++) {
+ struct vduse_virtqueue *vq = &dev->vqs[i];
+
+ vq->ready = false;
+ vq->desc_addr = 0;
+ vq->driver_addr = 0;
+ vq->device_addr = 0;
+ vq->num = 0;
+ memset(&vq->state, 0, sizeof(vq->state));
+
+ spin_lock(&vq->kick_lock);
+ vq->kicked = false;
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = NULL;
+ spin_unlock(&vq->kick_lock);
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = NULL;
+ vq->cb.private = NULL;
+ spin_unlock(&vq->irq_lock);
+ flush_work(&vq->inject);
+ flush_work(&vq->kick);
+ }
+}
+
+static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->desc_addr = desc_area;
+ vq->driver_addr = driver_area;
+ vq->device_addr = device_area;
+
+ return 0;
+}
+
+static void vduse_vq_kick(struct vduse_virtqueue *vq)
+{
+ spin_lock(&vq->kick_lock);
+ if (!vq->ready)
+ goto unlock;
+
+ if (vq->kickfd)
+ eventfd_signal(vq->kickfd, 1);
+ else
+ vq->kicked = true;
+unlock:
+ spin_unlock(&vq->kick_lock);
+}
+
+static void vduse_vq_kick_work(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, kick);
+
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_kick_vq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (!eventfd_signal_allowed()) {
+ schedule_work(&vq->kick);
+ return;
+ }
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = cb->callback;
+ vq->cb.private = cb->private;
+ spin_unlock(&vq->irq_lock);
+}
+
+static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->num = num;
+}
+
+static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 idx, bool ready)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->ready = ready;
+}
+
+static bool vduse_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ return vq->ready;
+}
+
+static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+ const struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq->state.packed.last_avail_counter =
+ state->packed.last_avail_counter;
+ vq->state.packed.last_avail_idx = state->packed.last_avail_idx;
+ vq->state.packed.last_used_counter =
+ state->packed.last_used_counter;
+ vq->state.packed.last_used_idx = state->packed.last_used_idx;
+ } else
+ vq->state.split.avail_index = state->split.avail_index;
+
+ return 0;
+}
+
+static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
+ return vduse_dev_get_vq_state_packed(dev, vq, &state->packed);
+
+ return vduse_dev_get_vq_state_split(dev, vq, &state->split);
+}
+
+static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vq_align;
+}
+
+static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_features;
+}
+
+static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->driver_features = features;
+ return 0;
+}
+
+static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = cb->callback;
+ dev->config_cb.private = cb->private;
+ spin_unlock(&dev->irq_lock);
+}
+
+static u16 vduse_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u16 num_max = 0;
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (num_max < dev->vqs[i].num_max)
+ num_max = dev->vqs[i].num_max;
+
+ return num_max;
+}
+
+static u32 vduse_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_id;
+}
+
+static u32 vduse_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vendor_id;
+}
+
+static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->status;
+}
+
+static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, status))
+ return;
+
+ dev->status = status;
+}
+
+static size_t vduse_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->config_size;
+}
+
+static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (len > dev->config_size - offset)
+ return;
+
+ memcpy(buf, dev->config + offset, len);
+}
+
+static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ /* Now we only support read-only configuration space */
+}
+
+static int vduse_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, 0))
+ return -EIO;
+
+ vduse_dev_reset(dev);
+
+ return 0;
+}
+
+static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->generation;
+}
+
+static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+ struct vhost_iotlb *iotlb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ int ret;
+
+ ret = vduse_domain_set_map(dev->domain, iotlb);
+ if (ret)
+ return ret;
+
+ ret = vduse_dev_update_iotlb(dev, 0ULL, ULLONG_MAX);
+ if (ret) {
+ vduse_domain_clear_map(dev->domain, iotlb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vduse_vdpa_free(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->vdev = NULL;
+}
+
+static const struct vdpa_config_ops vduse_vdpa_config_ops = {
+ .set_vq_address = vduse_vdpa_set_vq_address,
+ .kick_vq = vduse_vdpa_kick_vq,
+ .set_vq_cb = vduse_vdpa_set_vq_cb,
+ .set_vq_num = vduse_vdpa_set_vq_num,
+ .set_vq_ready = vduse_vdpa_set_vq_ready,
+ .get_vq_ready = vduse_vdpa_get_vq_ready,
+ .set_vq_state = vduse_vdpa_set_vq_state,
+ .get_vq_state = vduse_vdpa_get_vq_state,
+ .get_vq_align = vduse_vdpa_get_vq_align,
+ .get_features = vduse_vdpa_get_features,
+ .set_features = vduse_vdpa_set_features,
+ .set_config_cb = vduse_vdpa_set_config_cb,
+ .get_vq_num_max = vduse_vdpa_get_vq_num_max,
+ .get_device_id = vduse_vdpa_get_device_id,
+ .get_vendor_id = vduse_vdpa_get_vendor_id,
+ .get_status = vduse_vdpa_get_status,
+ .set_status = vduse_vdpa_set_status,
+ .get_config_size = vduse_vdpa_get_config_size,
+ .get_config = vduse_vdpa_get_config,
+ .set_config = vduse_vdpa_set_config,
+ .get_generation = vduse_vdpa_get_generation,
+ .reset = vduse_vdpa_reset,
+ .set_map = vduse_vdpa_set_map,
+ .free = vduse_vdpa_free,
+};
+
+static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
+}
+
+static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
+}
+
+static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+ unsigned long iova;
+ void *addr;
+
+ *dma_addr = DMA_MAPPING_ERROR;
+ addr = vduse_domain_alloc_coherent(domain, size,
+ (dma_addr_t *)&iova, flag, attrs);
+ if (!addr)
+ return NULL;
+
+ *dma_addr = (dma_addr_t)iova;
+
+ return addr;
+}
+
+static void vduse_dev_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
+}
+
+static size_t vduse_dev_max_mapping_size(struct device *dev)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return domain->bounce_size;
+}
+
+static const struct dma_map_ops vduse_dev_dma_ops = {
+ .map_page = vduse_dev_map_page,
+ .unmap_page = vduse_dev_unmap_page,
+ .alloc = vduse_dev_alloc_coherent,
+ .free = vduse_dev_free_coherent,
+ .max_mapping_size = vduse_dev_max_mapping_size,
+};
+
+static unsigned int perm_to_file_flags(u8 perm)
+{
+ unsigned int flags = 0;
+
+ switch (perm) {
+ case VDUSE_ACCESS_WO:
+ flags |= O_WRONLY;
+ break;
+ case VDUSE_ACCESS_RO:
+ flags |= O_RDONLY;
+ break;
+ case VDUSE_ACCESS_RW:
+ flags |= O_RDWR;
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags;
+}
+
+static int vduse_kickfd_setup(struct vduse_dev *dev,
+ struct vduse_vq_eventfd *eventfd)
+{
+ struct eventfd_ctx *ctx = NULL;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ if (eventfd->index >= dev->vq_num)
+ return -EINVAL;
+
+ index = array_index_nospec(eventfd->index, dev->vq_num);
+ vq = &dev->vqs[index];
+ if (eventfd->fd >= 0) {
+ ctx = eventfd_ctx_fdget(eventfd->fd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ } else if (eventfd->fd != VDUSE_EVENTFD_DEASSIGN)
+ return 0;
+
+ spin_lock(&vq->kick_lock);
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = ctx;
+ if (vq->ready && vq->kicked && vq->kickfd) {
+ eventfd_signal(vq->kickfd, 1);
+ vq->kicked = false;
+ }
+ spin_unlock(&vq->kick_lock);
+
+ return 0;
+}
+
+static bool vduse_dev_is_ready(struct vduse_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (!dev->vqs[i].num_max)
+ return false;
+
+ return true;
+}
+
+static void vduse_dev_irq_inject(struct work_struct *work)
+{
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+ spin_lock_irq(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+ spin_unlock_irq(&dev->irq_lock);
+}
+
+static void vduse_vq_irq_inject(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+ spin_lock_irq(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+ spin_unlock_irq(&vq->irq_lock);
+}
+
+static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vduse_dev *dev = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EPERM;
+
+ switch (cmd) {
+ case VDUSE_IOTLB_GET_FD: {
+ struct vduse_iotlb_entry entry;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ struct vduse_iova_domain *domain = dev->domain;
+ struct file *f = NULL;
+
+ ret = -EFAULT;
+ if (copy_from_user(&entry, argp, sizeof(entry)))
+ break;
+
+ ret = -EINVAL;
+ if (entry.start > entry.last)
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ entry.start, entry.last);
+ if (map) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ f = get_file(map_file->file);
+ entry.offset = map_file->offset;
+ entry.start = map->start;
+ entry.last = map->last;
+ entry.perm = map->perm;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ ret = -EINVAL;
+ if (!f)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &entry, sizeof(entry))) {
+ fput(f);
+ break;
+ }
+ ret = receive_fd(f, perm_to_file_flags(entry.perm));
+ fput(f);
+ break;
+ }
+ case VDUSE_DEV_GET_FEATURES:
+ /*
+ * Just mirror what driver wrote here.
+ * The driver is expected to check FEATURE_OK later.
+ */
+ ret = put_user(dev->driver_features, (u64 __user *)argp);
+ break;
+ case VDUSE_DEV_SET_CONFIG: {
+ struct vduse_config_data config;
+ unsigned long size = offsetof(struct vduse_config_data,
+ buffer);
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (config.length == 0 ||
+ config.length > dev->config_size - config.offset)
+ break;
+
+ ret = -EFAULT;
+ if (copy_from_user(dev->config + config.offset, argp + size,
+ config.length))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_DEV_INJECT_CONFIG_IRQ:
+ ret = 0;
+ queue_work(vduse_irq_wq, &dev->inject);
+ break;
+ case VDUSE_VQ_SETUP: {
+ struct vduse_vq_config config;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, sizeof(config)))
+ break;
+
+ ret = -EINVAL;
+ if (config.index >= dev->vq_num)
+ break;
+
+ if (!is_mem_zero((const char *)config.reserved,
+ sizeof(config.reserved)))
+ break;
+
+ index = array_index_nospec(config.index, dev->vq_num);
+ dev->vqs[index].num_max = config.max_size;
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_GET_INFO: {
+ struct vduse_vq_info vq_info;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&vq_info, argp, sizeof(vq_info)))
+ break;
+
+ ret = -EINVAL;
+ if (vq_info.index >= dev->vq_num)
+ break;
+
+ index = array_index_nospec(vq_info.index, dev->vq_num);
+ vq = &dev->vqs[index];
+ vq_info.desc_addr = vq->desc_addr;
+ vq_info.driver_addr = vq->driver_addr;
+ vq_info.device_addr = vq->device_addr;
+ vq_info.num = vq->num;
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq_info.packed.last_avail_counter =
+ vq->state.packed.last_avail_counter;
+ vq_info.packed.last_avail_idx =
+ vq->state.packed.last_avail_idx;
+ vq_info.packed.last_used_counter =
+ vq->state.packed.last_used_counter;
+ vq_info.packed.last_used_idx =
+ vq->state.packed.last_used_idx;
+ } else
+ vq_info.split.avail_index =
+ vq->state.split.avail_index;
+
+ vq_info.ready = vq->ready;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &vq_info, sizeof(vq_info)))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_SETUP_KICKFD: {
+ struct vduse_vq_eventfd eventfd;
+
+ ret = -EFAULT;
+ if (copy_from_user(&eventfd, argp, sizeof(eventfd)))
+ break;
+
+ ret = vduse_kickfd_setup(dev, &eventfd);
+ break;
+ }
+ case VDUSE_VQ_INJECT_IRQ: {
+ u32 index;
+
+ ret = -EFAULT;
+ if (get_user(index, (u32 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (index >= dev->vq_num)
+ break;
+
+ ret = 0;
+ index = array_index_nospec(index, dev->vq_num);
+ queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static int vduse_dev_release(struct inode *inode, struct file *file)
+{
+ struct vduse_dev *dev = file->private_data;
+
+ spin_lock(&dev->msg_lock);
+ /* Make sure the inflight messages can processed after reconncection */
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ spin_unlock(&dev->msg_lock);
+ dev->connected = false;
+
+ return 0;
+}
+
+static struct vduse_dev *vduse_dev_get_from_minor(int minor)
+{
+ struct vduse_dev *dev;
+
+ mutex_lock(&vduse_lock);
+ dev = idr_find(&vduse_idr, minor);
+ mutex_unlock(&vduse_lock);
+
+ return dev;
+}
+
+static int vduse_dev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct vduse_dev *dev = vduse_dev_get_from_minor(iminor(inode));
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = -EBUSY;
+ mutex_lock(&dev->lock);
+ if (dev->connected)
+ goto unlock;
+
+ ret = 0;
+ dev->connected = true;
+ file->private_data = dev;
+unlock:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static const struct file_operations vduse_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_dev_open,
+ .release = vduse_dev_release,
+ .read_iter = vduse_dev_read_iter,
+ .write_iter = vduse_dev_write_iter,
+ .poll = vduse_dev_poll,
+ .unlocked_ioctl = vduse_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct vduse_dev *vduse_dev_create(void)
+{
+ struct vduse_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return NULL;
+
+ mutex_init(&dev->lock);
+ spin_lock_init(&dev->msg_lock);
+ INIT_LIST_HEAD(&dev->send_list);
+ INIT_LIST_HEAD(&dev->recv_list);
+ spin_lock_init(&dev->irq_lock);
+
+ INIT_WORK(&dev->inject, vduse_dev_irq_inject);
+ init_waitqueue_head(&dev->waitq);
+
+ return dev;
+}
+
+static void vduse_dev_destroy(struct vduse_dev *dev)
+{
+ kfree(dev);
+}
+
+static struct vduse_dev *vduse_find_dev(const char *name)
+{
+ struct vduse_dev *dev;
+ int id;
+
+ idr_for_each_entry(&vduse_idr, dev, id)
+ if (!strcmp(dev->name, name))
+ return dev;
+
+ return NULL;
+}
+
+static int vduse_destroy_dev(char *name)
+{
+ struct vduse_dev *dev = vduse_find_dev(name);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&dev->lock);
+ if (dev->vdev || dev->connected) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+ dev->connected = true;
+ mutex_unlock(&dev->lock);
+
+ vduse_dev_reset(dev);
+ device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+ idr_remove(&vduse_idr, dev->minor);
+ kvfree(dev->config);
+ kfree(dev->vqs);
+ vduse_domain_destroy(dev->domain);
+ kfree(dev->name);
+ vduse_dev_destroy(dev);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static bool device_is_allowed(u32 device_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allowed_device_id); i++)
+ if (allowed_device_id[i] == device_id)
+ return true;
+
+ return false;
+}
+
+static bool features_is_valid(u64 features)
+{
+ if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
+ return false;
+
+ /* Now we only support read-only configuration space */
+ if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
+ return false;
+
+ return true;
+}
+
+static bool vduse_validate_config(struct vduse_dev_config *config)
+{
+ if (!is_mem_zero((const char *)config->reserved,
+ sizeof(config->reserved)))
+ return false;
+
+ if (config->vq_align > PAGE_SIZE)
+ return false;
+
+ if (config->config_size > PAGE_SIZE)
+ return false;
+
+ if (!device_is_allowed(config->device_id))
+ return false;
+
+ if (!features_is_valid(config->features))
+ return false;
+
+ return true;
+}
+
+static ssize_t msg_timeout_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%u\n", dev->msg_timeout);
+}
+
+static ssize_t msg_timeout_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = kstrtouint(buf, 10, &dev->msg_timeout);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(msg_timeout);
+
+static struct attribute *vduse_dev_attrs[] = {
+ &dev_attr_msg_timeout.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(vduse_dev);
+
+static int vduse_create_dev(struct vduse_dev_config *config,
+ void *config_buf, u64 api_version)
+{
+ int i, ret;
+ struct vduse_dev *dev;
+
+ ret = -EEXIST;
+ if (vduse_find_dev(config->name))
+ goto err;
+
+ ret = -ENOMEM;
+ dev = vduse_dev_create();
+ if (!dev)
+ goto err;
+
+ dev->api_version = api_version;
+ dev->device_features = config->features;
+ dev->device_id = config->device_id;
+ dev->vendor_id = config->vendor_id;
+ dev->name = kstrdup(config->name, GFP_KERNEL);
+ if (!dev->name)
+ goto err_str;
+
+ dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
+ VDUSE_BOUNCE_SIZE);
+ if (!dev->domain)
+ goto err_domain;
+
+ dev->config = config_buf;
+ dev->config_size = config->config_size;
+ dev->vq_align = config->vq_align;
+ dev->vq_num = config->vq_num;
+ dev->vqs = kcalloc(dev->vq_num, sizeof(*dev->vqs), GFP_KERNEL);
+ if (!dev->vqs)
+ goto err_vqs;
+
+ for (i = 0; i < dev->vq_num; i++) {
+ dev->vqs[i].index = i;
+ INIT_WORK(&dev->vqs[i].inject, vduse_vq_irq_inject);
+ INIT_WORK(&dev->vqs[i].kick, vduse_vq_kick_work);
+ spin_lock_init(&dev->vqs[i].kick_lock);
+ spin_lock_init(&dev->vqs[i].irq_lock);
+ }
+
+ ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto err_idr;
+
+ dev->minor = ret;
+ dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
+ dev->dev = device_create(vduse_class, NULL,
+ MKDEV(MAJOR(vduse_major), dev->minor),
+ dev, "%s", config->name);
+ if (IS_ERR(dev->dev)) {
+ ret = PTR_ERR(dev->dev);
+ goto err_dev;
+ }
+ __module_get(THIS_MODULE);
+
+ return 0;
+err_dev:
+ idr_remove(&vduse_idr, dev->minor);
+err_idr:
+ kfree(dev->vqs);
+err_vqs:
+ vduse_domain_destroy(dev->domain);
+err_domain:
+ kfree(dev->name);
+err_str:
+ vduse_dev_destroy(dev);
+err:
+ kvfree(config_buf);
+ return ret;
+}
+
+static long vduse_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ void __user *argp = (void __user *)arg;
+ struct vduse_control *control = file->private_data;
+
+ mutex_lock(&vduse_lock);
+ switch (cmd) {
+ case VDUSE_GET_API_VERSION:
+ ret = put_user(control->api_version, (u64 __user *)argp);
+ break;
+ case VDUSE_SET_API_VERSION: {
+ u64 api_version;
+
+ ret = -EFAULT;
+ if (get_user(api_version, (u64 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (api_version > VDUSE_API_VERSION)
+ break;
+
+ ret = 0;
+ control->api_version = api_version;
+ break;
+ }
+ case VDUSE_CREATE_DEV: {
+ struct vduse_dev_config config;
+ unsigned long size = offsetof(struct vduse_dev_config, config);
+ void *buf;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (vduse_validate_config(&config) == false)
+ break;
+
+ buf = vmemdup_user(argp + size, config.config_size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ break;
+ }
+ config.name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_create_dev(&config, buf, control->api_version);
+ break;
+ }
+ case VDUSE_DESTROY_DEV: {
+ char name[VDUSE_NAME_MAX];
+
+ ret = -EFAULT;
+ if (copy_from_user(name, argp, VDUSE_NAME_MAX))
+ break;
+
+ name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_destroy_dev(name);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&vduse_lock);
+
+ return ret;
+}
+
+static int vduse_release(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control = file->private_data;
+
+ kfree(control);
+ return 0;
+}
+
+static int vduse_open(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control;
+
+ control = kmalloc(sizeof(struct vduse_control), GFP_KERNEL);
+ if (!control)
+ return -ENOMEM;
+
+ control->api_version = VDUSE_API_VERSION;
+ file->private_data = control;
+
+ return 0;
+}
+
+static const struct file_operations vduse_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_open,
+ .release = vduse_release,
+ .unlocked_ioctl = vduse_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static char *vduse_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
+}
+
+static void vduse_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vduse_mgmtdev = {
+ .init_name = "vduse",
+ .release = vduse_mgmtdev_release,
+};
+
+static struct vdpa_mgmt_dev mgmt_dev;
+
+static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
+{
+ struct vduse_vdpa *vdev;
+ int ret;
+
+ if (dev->vdev)
+ return -EEXIST;
+
+ vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
+ &vduse_vdpa_config_ops, name, true);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ dev->vdev = vdev;
+ vdev->dev = dev;
+ vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
+ if (ret) {
+ put_device(&vdev->vdpa.dev);
+ return ret;
+ }
+ set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
+ vdev->vdpa.dma_dev = &vdev->vdpa.dev;
+ vdev->vdpa.mdev = &mgmt_dev;
+
+ return 0;
+}
+
+static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct vduse_dev *dev;
+ int ret;
+
+ mutex_lock(&vduse_lock);
+ dev = vduse_find_dev(name);
+ if (!dev || !vduse_dev_is_ready(dev)) {
+ mutex_unlock(&vduse_lock);
+ return -EINVAL;
+ }
+ ret = vduse_dev_init_vdpa(dev, name);
+ mutex_unlock(&vduse_lock);
+ if (ret)
+ return ret;
+
+ ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
+ if (ret) {
+ put_device(&dev->vdev->vdpa.dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ _vdpa_unregister_device(dev);
+}
+
+static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = {
+ .dev_add = vdpa_dev_add,
+ .dev_del = vdpa_dev_del,
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vduse_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpa_dev_mgmtdev_ops,
+};
+
+static int vduse_mgmtdev_init(void)
+{
+ int ret;
+
+ ret = device_register(&vduse_mgmtdev);
+ if (ret)
+ return ret;
+
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ device_unregister(&vduse_mgmtdev);
+ return ret;
+}
+
+static void vduse_mgmtdev_exit(void)
+{
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vduse_mgmtdev);
+}
+
+static int vduse_init(void)
+{
+ int ret;
+ struct device *dev;
+
+ vduse_class = class_create(THIS_MODULE, "vduse");
+ if (IS_ERR(vduse_class))
+ return PTR_ERR(vduse_class);
+
+ vduse_class->devnode = vduse_devnode;
+ vduse_class->dev_groups = vduse_dev_groups;
+
+ ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
+ if (ret)
+ goto err_chardev_region;
+
+ /* /dev/vduse/control */
+ cdev_init(&vduse_ctrl_cdev, &vduse_ctrl_fops);
+ vduse_ctrl_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_ctrl_cdev, vduse_major, 1);
+ if (ret)
+ goto err_ctrl_cdev;
+
+ dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_device;
+ }
+
+ /* /dev/vduse/$DEVICE */
+ cdev_init(&vduse_cdev, &vduse_dev_fops);
+ vduse_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_cdev, MKDEV(MAJOR(vduse_major), 1),
+ VDUSE_DEV_MAX - 1);
+ if (ret)
+ goto err_cdev;
+
+ vduse_irq_wq = alloc_workqueue("vduse-irq",
+ WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
+ if (!vduse_irq_wq)
+ goto err_wq;
+
+ ret = vduse_domain_init();
+ if (ret)
+ goto err_domain;
+
+ ret = vduse_mgmtdev_init();
+ if (ret)
+ goto err_mgmtdev;
+
+ return 0;
+err_mgmtdev:
+ vduse_domain_exit();
+err_domain:
+ destroy_workqueue(vduse_irq_wq);
+err_wq:
+ cdev_del(&vduse_cdev);
+err_cdev:
+ device_destroy(vduse_class, vduse_major);
+err_device:
+ cdev_del(&vduse_ctrl_cdev);
+err_ctrl_cdev:
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+err_chardev_region:
+ class_destroy(vduse_class);
+ return ret;
+}
+module_init(vduse_init);
+
+static void vduse_exit(void)
+{
+ vduse_mgmtdev_exit();
+ vduse_domain_exit();
+ destroy_workqueue(vduse_irq_wq);
+ cdev_del(&vduse_cdev);
+ device_destroy(vduse_class, vduse_major);
+ cdev_del(&vduse_ctrl_cdev);
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+ class_destroy(vduse_class);
+}
+module_exit(vduse_exit);
+
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index fe0527329857..5bcd00246d2e 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -189,10 +189,20 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
}
vp_modern_set_status(mdev, status);
+}
- if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
- (s & VIRTIO_CONFIG_S_DRIVER_OK))
+static int vp_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ vp_modern_set_status(mdev, 0);
+
+ if (s & VIRTIO_CONFIG_S_DRIVER_OK)
vp_vdpa_free_irq(vp_vdpa);
+
+ return 0;
}
static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
@@ -398,6 +408,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.set_features = vp_vdpa_set_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
+ .reset = vp_vdpa_reset,
.get_vq_num_max = vp_vdpa_get_vq_num_max,
.get_vq_state = vp_vdpa_get_vq_state,
.get_vq_notification = vp_vdpa_get_vq_notification,
@@ -435,7 +446,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL);
+ dev, &vp_vdpa_ops, NULL, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 0582079e4bcc..670d56c879e5 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -36,19 +36,21 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
/**
- * vhost_iotlb_add_range - add a new range to vhost IOTLB
+ * vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
* @addr: the address that is mapped to @start
* @perm: access permission of this range
+ * @opaque: the opaque pointer for the new mapping
*
* Returns an error last is smaller than start or memory allocation
* fails
*/
-int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
- u64 start, u64 last,
- u64 addr, unsigned int perm)
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ void *opaque)
{
struct vhost_iotlb_map *map;
@@ -71,6 +73,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
map->last = last;
map->addr = addr;
map->perm = perm;
+ map->opaque = opaque;
iotlb->nmaps++;
vhost_iotlb_itree_insert(map, &iotlb->root);
@@ -80,6 +83,15 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
return 0;
}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
+
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ return vhost_iotlb_add_range_ctx(iotlb, start, last,
+ addr, perm, NULL);
+}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 3a249ee7e144..28ef323882fb 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
.num = nvq->batched_xdp,
.ptr = nvq->xdp,
};
- int err;
+ int i, err;
if (nvq->batched_xdp == 0)
goto signal_used;
@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+ /* free pages owned by XDP; since this is an unlikely error path,
+ * keep it simple and avoid more complex bulk update for the
+ * used pages
+ */
+ for (i = 0; i < nvq->batched_xdp; ++i)
+ put_page(virt_to_head_page(nvq->xdp[i].data));
+ nvq->batched_xdp = 0;
+ nvq->done_idx = 0;
return;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 46f897e41217..532e204f2b1b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,24 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
* (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
- *
* Authors: Nicholas A. Bellinger <nab@daterainc.com>
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 9479f7f79217..f41d081777f5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -116,12 +116,13 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
-static void vhost_vdpa_reset(struct vhost_vdpa *v)
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- vdpa_reset(vdpa);
v->in_batch = 0;
+
+ return vdpa_reset(vdpa);
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -157,7 +158,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u8 status, status_old;
- int nvqs = v->nvqs;
+ int ret, nvqs = v->nvqs;
u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
@@ -172,7 +173,12 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
return -EINVAL;
- ops->set_status(vdpa, status);
+ if (status == 0) {
+ ret = ops->reset(vdpa);
+ if (ret)
+ return ret;
+ } else
+ ops->set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
@@ -498,7 +504,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
@@ -507,19 +513,44 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
- pinned = map->size >> PAGE_SHIFT;
- for (pfn = map->addr >> PAGE_SHIFT;
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
- atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_unmap(v, start, last);
+
+ return vhost_vdpa_pa_unmap(v, start, last);
+}
+
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{
struct vhost_dev *dev = &v->vdev;
@@ -551,21 +582,21 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
-static int vhost_vdpa_map(struct vhost_vdpa *v,
- u64 iova, u64 size, u64 pa, u32 perm)
+static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
+ u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
- r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
- pa, perm);
+ r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+ pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
- r = ops->dma_map(vdpa, iova, size, pa, perm);
+ r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
r = ops->set_map(vdpa, dev->iotlb);
@@ -573,13 +604,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
-
- if (r)
+ if (r) {
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
- else
- atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ return r;
+ }
- return r;
+ if (!vdpa->use_va)
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+
+ return 0;
}
static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
@@ -600,38 +633,78 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
}
}
-static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
- struct vhost_iotlb_msg *msg)
+static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ u64 offset, map_size, map_iova = iova;
+ struct vdpa_map_file *map_file;
+ struct vm_area_struct *vma;
+ int ret;
+
+ mmap_read_lock(dev->mm);
+
+ while (size) {
+ vma = find_vma(dev->mm, uaddr);
+ if (!vma) {
+ ret = -EINVAL;
+ break;
+ }
+ map_size = min(size, vma->vm_end - uaddr);
+ if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
+ !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
+ goto next;
+
+ map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
+ if (!map_file) {
+ ret = -ENOMEM;
+ break;
+ }
+ offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
+ map_file->offset = offset;
+ map_file->file = get_file(vma->vm_file);
+ ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+ perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ break;
+ }
+next:
+ size -= map_size;
+ uaddr += map_size;
+ map_iova += map_size;
+ }
+ if (ret)
+ vhost_vdpa_unmap(v, iova, map_iova - iova);
+
+ mmap_read_unlock(dev->mm);
+
+ return ret;
+}
+
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long lock_limit, sz2pin, nchunks, i;
- u64 iova = msg->iova;
+ u64 start = iova;
long pinned;
int ret = 0;
- if (msg->iova < v->range.first || !msg->size ||
- msg->iova > U64_MAX - msg->size + 1 ||
- msg->iova + msg->size - 1 > v->range.last)
- return -EINVAL;
-
- if (vhost_iotlb_itree_first(iotlb, msg->iova,
- msg->iova + msg->size - 1))
- return -EEXIST;
-
/* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list)
return -ENOMEM;
- if (msg->perm & VHOST_ACCESS_WO)
+ if (perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
- npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ npages = PFN_UP(size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
@@ -639,13 +712,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
mmap_read_lock(dev->mm);
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
}
- cur_base = msg->uaddr & PAGE_MASK;
+ cur_base = uaddr & PAGE_MASK;
iova &= PAGE_MASK;
nchunks = 0;
@@ -673,10 +746,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
- csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
- map_pfn << PAGE_SHIFT,
- msg->perm);
+ PFN_PHYS(map_pfn),
+ perm, NULL);
if (ret) {
/*
* Unpin the pages that are left unmapped
@@ -699,13 +772,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}
- cur_base += pinned << PAGE_SHIFT;
+ cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT, msg->perm);
+ ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
if (nchunks) {
@@ -724,13 +797,38 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
- vhost_vdpa_unmap(v, msg->iova, msg->size);
+ vhost_vdpa_unmap(v, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
free:
free_page((unsigned long)page_list);
return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_map(v, msg->iova, msg->size,
+ msg->uaddr, msg->perm);
+
+ return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+ msg->perm);
}
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
@@ -860,7 +958,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return -EBUSY;
nvqs = v->nvqs;
- vhost_vdpa_reset(v);
+ r = vhost_vdpa_reset(v);
+ if (r)
+ goto err;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
@@ -945,7 +1045,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index f249622ef11b..938aefbc75ec 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
- bool restore_flag = false;
+ u32 flags_to_restore = 0;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -178,16 +178,21 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* small rx buffers, headers of packets in rx queue are
* created dynamically and are initialized with header
* of current packet(except length). But in case of
- * SOCK_SEQPACKET, we also must clear record delimeter
- * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
- * packet with delimeter(which marks end of record),
- * there will be sequence of packets with delimeter
- * bit set. After initialized header will be copied to
- * rx buffer, this bit will be restored.
+ * SOCK_SEQPACKET, we also must clear message delimeter
+ * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+ * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+ * there will be sequence of packets with these
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
*/
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
- restore_flag = true;
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
}
}
@@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
- if (restore_flag)
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 71fb710f1ce3..7420d2c16e47 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
struct fb_event event;
+ u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
@@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
if (var->xres < 8 || var->yres < 8)
return -EINVAL;
+ /* Too huge resolution causes multiplication overflow. */
+ if (check_mul_overflow(var->xres, var->yres, &unused) ||
+ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
+ return -EINVAL;
+
ret = info->fbops->fb_check_var(var, info);
if (ret)
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 1ea0c1f6a1fd..588e02fb91d3 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -4,6 +4,7 @@
#include <linux/virtio_config.h>
#include <linux/module.h>
#include <linux/idr.h>
+#include <linux/of.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@@ -292,6 +293,8 @@ static void virtio_dev_remove(struct device *_d)
/* Acknowledge the device's existence again. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+
+ of_node_put(dev->dev.of_node);
}
static struct bus_type virtio_bus = {
@@ -318,6 +321,43 @@ void unregister_virtio_driver(struct virtio_driver *driver)
}
EXPORT_SYMBOL_GPL(unregister_virtio_driver);
+static int virtio_device_of_init(struct virtio_device *dev)
+{
+ struct device_node *np, *pnode = dev_of_node(dev->dev.parent);
+ char compat[] = "virtio,deviceXXXXXXXX";
+ int ret, count;
+
+ if (!pnode)
+ return 0;
+
+ count = of_get_available_child_count(pnode);
+ if (!count)
+ return 0;
+
+ /* There can be only 1 child node */
+ if (WARN_ON(count > 1))
+ return -EINVAL;
+
+ np = of_get_next_available_child(pnode, NULL);
+ if (WARN_ON(!np))
+ return -ENODEV;
+
+ ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
+ BUG_ON(ret >= sizeof(compat));
+
+ if (!of_device_is_compatible(np, compat)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev->dev.of_node = np;
+ return 0;
+
+out:
+ of_node_put(np);
+ return ret;
+}
+
/**
* register_virtio_device - register virtio device
* @dev : virtio device to be registered
@@ -342,6 +382,10 @@ int register_virtio_device(struct virtio_device *dev)
dev->index = err;
dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = virtio_device_of_init(dev);
+ if (err)
+ goto out_ida_remove;
+
spin_lock_init(&dev->config_lock);
dev->config_enabled = false;
dev->config_change_pending = false;
@@ -362,10 +406,16 @@ int register_virtio_device(struct virtio_device *dev)
*/
err = device_add(&dev->dev);
if (err)
- ida_simple_remove(&virtio_index_ida, dev->index);
+ goto out_of_node_put;
+
+ return 0;
+
+out_of_node_put:
+ of_node_put(dev->dev.of_node);
+out_ida_remove:
+ ida_simple_remove(&virtio_index_ida, dev->index);
out:
- if (err)
- virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
}
EXPORT_SYMBOL_GPL(register_virtio_device);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 47dce91f788c..c22ff0117b46 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -531,8 +531,8 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
}
- err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
- vqs, callbacks, names, NULL, NULL);
+ err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
+ callbacks, names, NULL);
if (err)
return err;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index b91bc810a87e..bef8ad6bf466 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -143,6 +143,8 @@ struct virtio_mem {
* add_memory_driver_managed().
*/
const char *resource_name;
+ /* Memory group identification. */
+ int mgid;
/*
* We don't want to add too much memory if it's not getting onlined,
@@ -626,8 +628,8 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
addr + size - 1);
/* Memory might get onlined immediately. */
atomic64_add(size, &vm->offline_size);
- rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
- MHP_MERGE_RESOURCE);
+ rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
+ MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
if (rc) {
atomic64_sub(size, &vm->offline_size);
dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
@@ -677,7 +679,7 @@ static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = remove_memory(vm->nid, addr, size);
+ rc = remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -720,7 +722,7 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
"offlining and removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = offline_and_remove_memory(vm->nid, addr, size);
+ rc = offline_and_remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -2569,6 +2571,7 @@ static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
+ uint64_t unit_pages;
int rc;
BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
@@ -2603,6 +2606,16 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
+ /* use a single dynamic memory group to cover the whole memory device */
+ if (vm->in_sbm)
+ unit_pages = PHYS_PFN(memory_block_size_bytes());
+ else
+ unit_pages = PHYS_PFN(vm->bbm.bb_size);
+ rc = memory_group_register_dynamic(vm->nid, unit_pages);
+ if (rc < 0)
+ goto out_del_resource;
+ vm->mgid = rc;
+
/*
* If we still have memory plugged, we have to unplug all memory first.
* Registering our parent resource makes sure that this memory isn't
@@ -2617,7 +2630,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
- goto out_del_resource;
+ goto out_unreg_group;
rc = register_virtio_mem_device(vm);
if (rc)
goto out_unreg_mem;
@@ -2631,6 +2644,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
return 0;
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
+out_unreg_group:
+ memory_group_unregister(vm->mgid);
out_del_resource:
virtio_mem_delete_resource(vm);
out_del_vq:
@@ -2695,6 +2710,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
} else {
virtio_mem_delete_resource(vm);
kfree_const(vm->resource_name);
+ memory_group_unregister(vm->mgid);
}
/* remove all tracking data - no locking needed */